|
12 | 12 | import time |
13 | 13 | import logging |
14 | 14 | import json |
| 15 | +import importlib |
15 | 16 | import pandas as pd |
16 | 17 | import numpy as np |
17 | 18 | import geopandas as gpd |
@@ -86,6 +87,8 @@ def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None, |
86 | 87 | add_millan_thickness=False, add_millan_velocity=False, |
87 | 88 | add_hugonnet_dhdt=False, add_bedmachine=False, |
88 | 89 | add_glathida=False, |
| 90 | + custom_climate_task=None, |
| 91 | + custom_climate_task_kwargs=None, |
89 | 92 | start_level=None, start_base_url=None, max_level=5, |
90 | 93 | logging_level='WORKFLOW', |
91 | 94 | dynamic_spinup=False, err_dmdtda_scaling_factor=0.2, |
@@ -169,6 +172,12 @@ def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None, |
169 | 172 | add_glathida : bool |
170 | 173 | adds (reprojects) the glathida thickness data to the glacier |
171 | 174 | directories. Data points are stored as csv files. |
| 175 | + custom_climate_task : str |
| 176 | + optional import path to a custom climate task in the form |
| 177 | + "module_path:function_name". If provided, it will be called instead of |
| 178 | + the default process_climate_data. |
| 179 | + custom_climate_task_kwargs : dict |
| 180 | + optional kwargs passed to the custom climate task when it is executed. |
172 | 181 | start_level : int |
173 | 182 | the pre-processed level to start from (default is to start from |
174 | 183 | scratch). If set, you'll need to indicate start_base_url as well. |
@@ -629,7 +638,23 @@ def _time_log(): |
629 | 638 | utils.mkdir(sum_dir) |
630 | 639 |
|
631 | 640 | # Climate |
632 | | - workflow.execute_entity_task(tasks.process_climate_data, gdirs) |
| 641 | + climate_kwargs = custom_climate_task_kwargs or {} |
| 642 | + if custom_climate_task: |
| 643 | + try: |
| 644 | + mod_path, func_name = custom_climate_task.rsplit(':', 1) |
| 645 | + except ValueError: |
| 646 | + raise InvalidParamsError('custom_climate_task must be of the form "module:function"') |
| 647 | + try: |
| 648 | + mod = importlib.import_module(mod_path) |
| 649 | + except ModuleNotFoundError as err: |
| 650 | + raise InvalidParamsError(f'Cannot import module {mod_path}') from err |
| 651 | + try: |
| 652 | + custom_task_func = getattr(mod, func_name) |
| 653 | + except AttributeError as err: |
| 654 | + raise InvalidParamsError(f'Module {mod_path} has no attribute {func_name}') from err |
| 655 | + workflow.execute_entity_task(custom_task_func, gdirs, **climate_kwargs) |
| 656 | + else: |
| 657 | + workflow.execute_entity_task(tasks.process_climate_data, gdirs) |
633 | 658 |
|
634 | 659 | # Small optim to avoid concurrency |
635 | 660 | utils.get_geodetic_mb_dataframe() |
@@ -946,6 +971,11 @@ def parse_args(args): |
946 | 971 | help='adds (reprojects) the glathida point thickness ' |
947 | 972 | 'observations to the glacier directories. ' |
948 | 973 | 'The data points are stored as csv.') |
| 974 | + parser.add_argument('--custom-climate-task', type=str, default=None, |
| 975 | + help='Custom climate task import path in the form module:function. ' |
| 976 | + 'If provided, it replaces the default process_climate_data.') |
| 977 | + parser.add_argument('--custom-climate-task-kwargs', type=json.loads, default=None, |
| 978 | + help='JSON dict of kwargs passed to the custom climate task.') |
949 | 979 | parser.add_argument('--demo', nargs='?', const=True, default=False, |
950 | 980 | help='if you want to run the prepro for the ' |
951 | 981 | 'list of demo glaciers.') |
@@ -1036,6 +1066,8 @@ def parse_args(args): |
1036 | 1066 | add_hugonnet_dhdt=args.add_hugonnet_dhdt, |
1037 | 1067 | add_bedmachine=args.add_bedmachine, |
1038 | 1068 | add_glathida=args.add_glathida, |
| 1069 | + custom_climate_task=args.custom_climate_task, |
| 1070 | + custom_climate_task_kwargs=args.custom_climate_task_kwargs, |
1039 | 1071 | dynamic_spinup=dynamic_spinup, |
1040 | 1072 | err_dmdtda_scaling_factor=args.err_dmdtda_scaling_factor, |
1041 | 1073 | dynamic_spinup_start_year=args.dynamic_spinup_start_year, |
|
0 commit comments