@@ -23,40 +23,23 @@ def parse_mokelumne_flow(fname):
23
23
return date , float (val .replace ("," , "" ))
24
24
25
25
26
- def update_existing (date , value , fname ):
27
- name = fname .split (".csv" )[0 ]
28
- if not len (glob .glob (f"{ name } *.csv" )):
29
- print (f"{ fname } does not exist! Starting a new file named: { fname } " )
30
- df = pd .DataFrame (columns = ["Date" , "Value" ])
31
- df = df .set_index ("Date" )
32
- df = df .astype ("float" )
33
- else :
34
- df = utils .read_by_years (name )
35
- #
36
- dfnew = pd .DataFrame (
37
- [[float (value )]], columns = df .columns , index = [pd .to_datetime (date )]
38
- )
39
- dfnew .index .name = "Date"
40
- # update the new data frame values
41
- df = pd .concat ([df , dfnew ])
42
- # if duplicate indexes, keep the last one (latest)
43
- df = df [~ df .index .duplicated (keep = "last" )]
44
- df = df .sort_index ()
45
- utils .store_by_years (df , name )
46
-
47
-
48
26
def update_last_7days (
49
27
fname = "mokelumne_flow.csv" ,
50
28
raw_dir = "raw" ,
51
- converted_dir = "formatted " ,
29
+ converted_dir = ". " ,
52
30
):
53
31
today = datetime .datetime .now ()
32
+ vals = []
54
33
for i in range (7 ):
55
34
report_date = today - datetime .timedelta (days = (i + 1 ))
56
35
date_str = report_date .strftime ("%m/%d/%Y" )
57
36
download_fname = build_filename (date_str , raw_dir )
58
37
rvals = parse_mokelumne_flow (download_fname )
59
- update_existing (* rvals , fname = os .path .join (converted_dir , fname ))
38
+ vals .append (rvals )
39
+ df = pd .DataFrame (vals , columns = ["Date" , "Value" ])
40
+ df .set_index ("Date" , inplace = True )
41
+ df .sort_index (inplace = True )
42
+ df .to_csv (fname )
60
43
61
44
62
45
def save_report (date_str , base_dir ):
0 commit comments