Skip to content
Snippets Groups Projects
cancellations_over_years.py 1.27 KiB
Newer Older
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.functions import col, lit, sum, round
from pyspark.sql.types import IntegerType

spark = SparkSession.builder.getOrCreate()

eriwe600's avatar
eriwe600 committed
df = spark.read.parquet("/user/s2878755/airline.parquet")

# check which years cancellation was registered
'''
df1 = df.filter(~col('CancellationCode').contains('NA')) \
        .select('Year', lit(1).alias('Count')) \
        .groupBy('Year').agg(sum('Count').alias('Count')) \
        .show()
'''

# Cancellation was registered 2003-2008, but very few 2003. Check the ratio difference between 2004 and 2008:

df = df.filter((col('Year') >= 2004) & (col('Cancelled') == 1) & ~col('CancellationCode').contains('NA'))

df1 = df.select('Year', 'CancellationCode', lit(1).alias('Cancellations')) \
        .groupBy('Year', 'CancellationCode').agg(sum('Cancellations').alias('Cancellations'))

df2 = df.select('Year', lit(1).alias('TotCancellations')) \
        .groupBy('Year').agg(sum('TotCancellations').alias('TotCancellations'))

df = df1.join(df2, df1.Year == df2.Year) \
eriwe600's avatar
eriwe600 committed
        .select(df1.Year, df1.CancellationCode, (100*col('Cancellations')/col('TotCancellations')).alias('Percentage')) \
        .sort(col('Year'))
eriwe600's avatar
eriwe600 committed

eriwe600's avatar
eriwe600 committed
df.write.csv('/user/s2878755/cancellations_over_years.py')