Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .github/workflows/black.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
name: Black

on: push

jobs:
black:
runs-on: ubuntu-24.04

steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- uses: psf/black@stable
with:
options: "--check"
21 changes: 13 additions & 8 deletions .github/workflows/pypi-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ on:
push:
tags:
- '[0-9]+.[0-9]+.[0-9]+'
- '[0-9]+.[0-9]+.[0-9]+\.dev[0-9]+'

jobs:
publish:
Expand All @@ -16,14 +17,18 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: '3.12'
- name: Install dependencies
- name: Install Poetry
run: |
python -m pip install --upgrade pip
pip install setuptools wheel twine sphinx
- name: Build and publish
curl -sSL https://install.python-poetry.org | python3 -
- name: Install and publish project
env:
TWINE_USERNAME: ${{ secrets.pcic_at_pypi_username }}
TWINE_PASSWORD: ${{ secrets.pcic_at_pypi_password }}
PCIC_PYPI_USERNAME: ${{ secrets.pcic_at_pypi_username }}
PCIC_PYPI_PASSWORD: ${{ secrets.pcic_at_pypi_password }}
run: |
python setup.py sdist bdist_wheel
twine upload --repository-url https://pypi.pacificclimate.org/ --skip-existing -u $TWINE_USERNAME -p $TWINE_PASSWORD dist/*
# Configure Poetry to publish to PCIC private package repository
poetry config repositories.pcic https://pypi.pacificclimate.org/
poetry config http-basic.pcic $PCIC_PYPI_USERNAME $PCIC_PYPI_PASSWORD
# Install, build and publish
# poetry install
poetry build
poetry publish -r pcic
11 changes: 6 additions & 5 deletions ce/api/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
""" PCIC Climate Explorer backend API module
"""PCIC Climate Explorer backend API module

.. moduleauthor:: James Hiebert <hiebert@uvic.ca>

"""


import inspect
from datetime import datetime

Expand Down Expand Up @@ -169,8 +168,10 @@ def format_dates(obj):
if not isinstance(obj, dict):
return obj
return {
key: val.strftime(time_format)
if isinstance(val, datetime)
else format_dates(val)
key: (
val.strftime(time_format)
if isinstance(val, datetime)
else format_dates(val)
)
for key, val in obj.items()
}
7 changes: 3 additions & 4 deletions ce/api/data.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""module for requesting data across multiple files through the API
"""
"""module for requesting data across multiple files through the API"""

import numpy as np
import os
Expand Down Expand Up @@ -61,9 +60,9 @@ def data(
climatological_statistic (str): Statistical operation applied to variable in a
climatological dataset (e.g "mean", "standard_deviation",
"percentile). Defaulted to "mean".

percentile (float): if climatological_statistic is "percentile", specifies what
percentile value to use. A percentile value must be provided if the
percentile value to use. A percentile value must be provided if the
climatological_statistic is "percentile".

is_thredds (bool): If set to `True` the filepath will be searched for
Expand Down
2 changes: 1 addition & 1 deletion ce/api/geo.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def polygon_to_mask(nc, resource, poly, variable):

def make_masked_file_key(nc, resource, wkt, varname):
"""generates a key suitable for characterizing a masked netCDF file:
filename and polygon"""
filename and polygon"""
return (resource, wkt)


Expand Down
3 changes: 1 addition & 2 deletions ce/api/grid.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""module for requesting the lat/lon grid for a given model run file
"""
"""module for requesting the lat/lon grid for a given model run file"""

from sqlalchemy.orm.exc import NoResultFound

Expand Down
7 changes: 4 additions & 3 deletions ce/api/health/regions.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""module for requesting a summary of stored region data status."""

import os
from csv import DictReader
from ce.api.multimeta import multimeta
Expand All @@ -8,9 +9,9 @@

def region_status(region, metadata):
"""Opens a stored data file for a region, checks the modtime of the
stored data against the current versions of each file (as listed
in the metadata) and returns a list of the each file from which
stored data was calculated and its status."""
stored data against the current versions of each file (as listed
in the metadata) and returns a list of the each file from which
stored data was calculated and its status."""

date_format = "%Y-%m-%dT%H:%M:%SZ"

Expand Down
4 changes: 1 addition & 3 deletions ce/api/lister.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
"""module for requesting unique_ids from ensemble or model short name
"""
"""module for requesting unique_ids from ensemble or model short name"""

from modelmeta import Ensemble


def lister(sesh, ensemble_name="ce_files", model=None):

"""
Args
sesh (sqlalchemy.orm.session.Session): A database Session object
Expand Down
12 changes: 9 additions & 3 deletions ce/api/metadata.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""module for requesting metadata for one single file through the API
"""
"""module for requesting metadata for one single file through the API"""

from sqlalchemy.orm.exc import NoResultFound

from modelmeta import DataFile
Expand Down Expand Up @@ -131,4 +131,10 @@ def metadata(sesh, model_id, extras=""):
}
)

return {model_id: {**base_values, **requested_extra_values, **time_values,}}
return {
model_id: {
**base_values,
**requested_extra_values,
**time_values,
}
}
4 changes: 1 addition & 3 deletions ce/api/models.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
"""module for requesting list of available models
"""
"""module for requesting list of available models"""

from modelmeta import Ensemble


def models(sesh, ensemble_name="ce_files"):

"""
Args
sesh (sqlalchemy.orm.session.Session): A database Session object
Expand Down
5 changes: 2 additions & 3 deletions ce/api/multimeta.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""module for requesting metadata from multiple files based on model or ensemble
"""
"""module for requesting metadata from multiple files based on model or ensemble"""

from modelmeta import DataFile, Model, Emission, Run
from modelmeta import DataFileVariableGridded, VariableAlias, TimeSet
Expand Down Expand Up @@ -47,7 +46,7 @@ def multimeta(
climatological_statistic(str): Statistical operation applied to variable in a
climatological dataset (e.g "mean", "standard_deviation",
"percentile"). Defaulted to "mean".

percentile(float): optionally, specify a a single percentile values to filter on.

Returns:
Expand Down
3 changes: 1 addition & 2 deletions ce/api/multistats.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""module for requesting stats from multiple files based on model or ensemble
"""
"""module for requesting stats from multiple files based on model or ensemble"""

from ce.api.stats import stats
from ce.api.util import search_for_unique_ids
Expand Down
12 changes: 8 additions & 4 deletions ce/api/stats.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""module for requsting summary statistics, averaged across a region
"""
"""module for requsting summary statistics, averaged across a region"""

import numpy as np
import numpy.ma as ma
Expand All @@ -26,7 +25,12 @@


def stats(
sesh, id_, time, area, variable, is_thredds=False,
sesh,
id_,
time,
area,
variable,
is_thredds=False,
):
"""Request and calculate summary statistics averaged across a region

Expand Down Expand Up @@ -133,7 +137,7 @@ def stats(

def array_stats(array):
"""Return the min, max, mean, median, standard deviation and number
of cells of a 3d data grid (numpy.ma.MaskedArray)
of cells of a 3d data grid (numpy.ma.MaskedArray)
"""
return {
"min": np.min(array).item(),
Expand Down
22 changes: 13 additions & 9 deletions ce/api/streamflow/downstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
spatial tuple to a data index tuple and vice versa, also switch the
dimension order accordingly.
"""

from gettext import dpgettext
from this import d
from contexttimer import Timer
Expand Down Expand Up @@ -53,7 +54,7 @@ def downstream(sesh, station, ensemble_name):
and converting their contents to `VicDataGrid` objects for consumption by
`downstream_worker`, which as its name suggests, does most of the work.
"""

station_lonlat = setup(station)

with get_time_invariant_variable_dataset(
Expand All @@ -75,7 +76,8 @@ def downstream(sesh, station, ensemble_name):


def downstream_worker(
station_lonlat, flow_direction,
station_lonlat,
flow_direction,
):
"""Compute the watershed.

Expand Down Expand Up @@ -136,16 +138,16 @@ def build_downstream_watershed(target, routing, direction_map, debug=False):
represented by an offset of +1 or -1, respectively.
:param debug: Boolean indicating whether this function should compute
and return debug information.
:return: Tuple of cells (cell indices as tuples) that drain from `target` in
:return: Tuple of cells (cell indices as tuples) that drain from `target` in
a downstream flow order.

Notes:

- In this function, a cell is represented by an (x, y) index pair.

- Routing graphs can and in practice do contain cycles. Variable
`stream` is used to check whether a cell has already been
visited during the traversal of the routing graph, i.e., whether we
- Routing graphs can and in practice do contain cycles. Variable
`stream` is used to check whether a cell has already been
visited during the traversal of the routing graph, i.e., whether we
are cycling, and if so not to repeat that subgraph.
"""

Expand All @@ -156,10 +158,12 @@ def downstream(stream):
cell_routing = routing[stream[-1]]
downstream_neighbour = vec_add(stream[-1], direction_map[int(cell_routing)])

if downstream_neighbour in stream or not is_valid_index(downstream_neighbour, routing.shape):
if downstream_neighbour in stream or not is_valid_index(
downstream_neighbour, routing.shape
):
return stream

stream += (downstream_neighbour,)
return downstream(stream)
return downstream((target,))

return downstream((target,))
2 changes: 1 addition & 1 deletion ce/api/streamflow/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def is_downstream(neighbour, cell, routing, direction_map):


def VIC_direction_matrix(lat_step, lon_step):
""" Return a VIC direction matrix, which is a matrix indexed by the VIC
"""Return a VIC direction matrix, which is a matrix indexed by the VIC
streamflow direction codes 0...9, with the value at index `i` indicating
the offsets from the data index in a streamflow file required to
step in that streamflow direction.
Expand Down
32 changes: 20 additions & 12 deletions ce/api/streamflow/watershed.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
spatial tuple to a data index tuple and vice versa, also switch the
dimension order accordingly.
"""

import math
from contexttimer import Timer

Expand Down Expand Up @@ -59,17 +60,21 @@ def watershed(sesh, station, ensemble_name):
"""
station_lonlat = setup(station)

with get_time_invariant_variable_dataset(
sesh, ensemble_name, "flow_direction"
) as flow_direction_ds, get_time_invariant_variable_dataset(
sesh, ensemble_name, "elev"
) as elevation_ds, get_time_invariant_variable_dataset(
sesh, ensemble_name, "elevmin"
) as elevation_min_ds, get_time_invariant_variable_dataset(
sesh, ensemble_name, "elevmax"
) as elevation_max_ds, get_time_invariant_variable_dataset(
sesh, ensemble_name, "area"
) as area_ds:
with (
get_time_invariant_variable_dataset(
sesh, ensemble_name, "flow_direction"
) as flow_direction_ds,
get_time_invariant_variable_dataset(
sesh, ensemble_name, "elev"
) as elevation_ds,
get_time_invariant_variable_dataset(
sesh, ensemble_name, "elevmin"
) as elevation_min_ds,
get_time_invariant_variable_dataset(
sesh, ensemble_name, "elevmax"
) as elevation_max_ds,
get_time_invariant_variable_dataset(sesh, ensemble_name, "area") as area_ds,
):
try:
return worker(
station_lonlat,
Expand Down Expand Up @@ -218,7 +223,10 @@ def worker(
"elevation_units": elevation_mean.units,
"area_units": area.units,
},
"melton_ratio": {"units": "km/km", "value": m_ratio,},
"melton_ratio": {
"units": "km/km",
"value": m_ratio,
},
"boundary": geojson_feature(
outline,
properties={
Expand Down
Loading
Loading