-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathTimestamp_dates.py
106 lines (38 loc) · 1.21 KB
/
Timestamp_dates.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from pyspark.sql import SparkSession
# In[2]:
spark = SparkSession.builder.appName('timestamp').getOrCreate()
# In[5]:
df = spark.read.csv('/home/tushar/spark-2.3.0-bin-hadoop2.7/python/Python-and-Spark-for-Big-Data-master/Spark_DataFrames/appl_stock.csv',inferSchema=True,header=True)
# In[6]:
df.show()
# In[7]:
df.head(2)
# In[12]:
df.head()
# In[13]:
df.select(['Date','Open']).show()
# In[14]:
from pyspark.sql.functions import (dayofmonth,hour,dayofyear,month,year,weekofyear,format_number,date_format)
# In[15]:
df.select(dayofmonth(df['Date'])).show()
# In[16]:
df.select(year('Date')).show()
# In[17]:
df.show()
# In[19]:
#####
###
# WE ARE USING THE WITHCOLUMN TO CREATE NEW COLUMN BY FILTERING THE DATE COLUMN OF DATASET ON APPLYING FUNCTION YEAR
newdf = df.withColumn("Years",year("Date"))
# In[26]:
# HERE WE ARE GROUPING BY THE YEARS AND FINDING THE MEAN!!!
# THEN SELECTING THE YEARS AND AVG(CLOSE ) VALUES
result = newdf.groupBy("Years").mean().select(['Years','avg(close)'])
# In[27]:
result.show()
# In[31]:
result.select(['Years',(format_number("avg(close)",3).alias("Closing"))]).show()
# In[ ]: