@@ -219,7 +219,7 @@ def parfile_relations(self):
219
219
if x is not None
220
220
else lb_max ["lbound" ]
221
221
)
222
- pr ["zero_based" ] = self .zero_based
222
+ pr ["zero_based" ] = self .zero_based # todo -- chase this out if going to file specific zero based def
223
223
return pr
224
224
225
225
def _generic_get_xy (self , args , ** kwargs ):
@@ -968,7 +968,7 @@ def _par_prep(
968
968
sep = " "
969
969
if rel_filepath .suffix .lower () == ".csv" :
970
970
sep = ","
971
- if df .columns . is_integer ():
971
+ if pd . api . types . is_integer_dtype ( df .columns ): # df.columns. is_integer(): # really!???
972
972
hheader = False
973
973
else :
974
974
hheader = df .columns
@@ -1912,7 +1912,9 @@ def add_parameters(
1912
1912
par_style = par_style [0 ]
1913
1913
if par_style not in ["m" , "d" , "a" ]:
1914
1914
self .logger .lraise (
1915
- "add_parameters(): unrecognized 'style': {0}, should be either 'm'/'mult'/'multiplier', 'a'/'add'/'addend' or 'd'/'direct'" .format (
1915
+ "add_parameters(): unrecognized 'style': {0}, "
1916
+ "should be either 'm'/'mult'/'multiplier', "
1917
+ "'a'/'add'/'addend' or 'd'/'direct'" .format (
1916
1918
par_style
1917
1919
)
1918
1920
)
@@ -2138,7 +2140,7 @@ def add_parameters(
2138
2140
2139
2141
pp_filename = None # setup placeholder variables
2140
2142
fac_filename = None
2141
-
2143
+ nxs = None
2142
2144
# Process model parameter files to produce appropriate pest pars
2143
2145
if index_cols is not None : # Assume list/tabular type input files
2144
2146
# ensure inputs are provided for all required cols
@@ -2167,7 +2169,7 @@ def add_parameters(
2167
2169
par_type .startswith ("grid" ) or par_type .startswith ("p" )
2168
2170
) and geostruct is not None :
2169
2171
get_xy = self .get_xy
2170
- df = write_list_tpl (
2172
+ df , nxs = write_list_tpl (
2171
2173
filenames ,
2172
2174
dfs ,
2173
2175
par_name_base ,
@@ -2189,6 +2191,7 @@ def add_parameters(
2189
2191
fill_value = initial_value ,
2190
2192
logger = self .logger ,
2191
2193
)
2194
+ nxs = {fname : nx for fname , nx in zip (filenames , nxs )}
2192
2195
assert (
2193
2196
np .mod (len (df ), len (use_cols )) == 0.0
2194
2197
), "Parameter dataframe wrong shape for number of cols {0}" "" .format (
@@ -2273,14 +2276,13 @@ def add_parameters(
2273
2276
structured = True
2274
2277
for mod_file , ar in file_dict .items ():
2275
2278
orgdata = ar .shape
2276
- if spatial_reference_type == 'vertex' :
2279
+ if spatial_reference_type == 'vertex' :
2277
2280
assert orgdata [0 ] == spatial_reference .ncpl , (
2278
2281
"Spatial reference ncpl not equal to original data ncpl for\n "
2279
2282
+ os .path .join (
2280
2283
* os .path .split (self .original_file_d )[1 :], mod_file
2281
2284
)
2282
2285
)
2283
-
2284
2286
else :
2285
2287
assert orgdata [0 ] == spatial_reference .nrow , (
2286
2288
"Spatial reference nrow not equal to original data nrow for\n "
@@ -2643,7 +2645,7 @@ def add_parameters(
2643
2645
zone_filename = zone_filename .name
2644
2646
2645
2647
relate_parfiles = []
2646
- for mod_file in file_dict .keys ():
2648
+ for mod_file , pdf in file_dict .items ():
2647
2649
mult_dict = {
2648
2650
"org_file" : Path (self .original_file_d .name , mod_file .name ),
2649
2651
"model_file" : mod_file ,
@@ -2655,8 +2657,9 @@ def add_parameters(
2655
2657
"upper_bound" : ult_ubound ,
2656
2658
"lower_bound" : ult_lbound ,
2657
2659
"operator" : par_style ,
2658
- "chkpar" : len (df )
2659
2660
}
2661
+ if nxs :
2662
+ mult_dict ["chkpar" ] = nxs [mod_file ]
2660
2663
if par_style in ["m" , "a" ]:
2661
2664
mult_dict ["mlt_file" ] = Path (self .mult_file_d .name , mlt_filename )
2662
2665
@@ -3094,7 +3097,7 @@ def write_list_tpl(
3094
3097
# get dataframe with autogenerated parnames based on `name`, `index_cols`,
3095
3098
# `use_cols`, `suffix` and `par_type`
3096
3099
if par_style == "d" :
3097
- df_tpl = _write_direct_df_tpl (
3100
+ df_tpl , nxs = _write_direct_df_tpl (
3098
3101
filenames [0 ],
3099
3102
tpl_filename ,
3100
3103
dfs [0 ],
@@ -3130,8 +3133,8 @@ def write_list_tpl(
3130
3133
par_fill_value = fill_value ,
3131
3134
par_style = par_style ,
3132
3135
)
3133
- idxs = [df .loc [:, index_cols ].values . tolist () for df in dfs ]
3134
- use_rows = _get_use_rows (
3136
+ idxs = [[ tuple ( s ) for s in df .loc [:, index_cols ].values ] for df in dfs ]
3137
+ use_rows , nxs = _get_use_rows (
3135
3138
df_tpl , idxs , use_rows , zero_based , tpl_filename , logger = logger
3136
3139
)
3137
3140
df_tpl = df_tpl .loc [use_rows , :] # direct pars done in direct function
@@ -3227,7 +3230,7 @@ def write_list_tpl(
3227
3230
df_par .loc [:, "tpl_filename" ] = tpl_filename
3228
3231
df_par .loc [:, "input_filename" ] = input_filename
3229
3232
df_par .loc [:, "parval1" ] = parval
3230
- return df_par
3233
+ return df_par , nxs
3231
3234
3232
3235
3233
3236
def _write_direct_df_tpl (
@@ -3311,8 +3314,8 @@ def _write_direct_df_tpl(
3311
3314
init_df = df ,
3312
3315
init_fname = in_filename ,
3313
3316
)
3314
- idxs = df .loc [:, index_cols ].values . tolist ()
3315
- use_rows = _get_use_rows (
3317
+ idxs = [ tuple ( s ) for s in df .loc [:, index_cols ].values ]
3318
+ use_rows , nxs = _get_use_rows (
3316
3319
df_ti , [idxs ], use_rows , zero_based , tpl_filename , logger = logger
3317
3320
)
3318
3321
df_ti = df_ti .loc [use_rows ]
@@ -3325,7 +3328,7 @@ def _write_direct_df_tpl(
3325
3328
pyemu .helpers ._write_df_tpl (
3326
3329
tpl_filename , direct_tpl_df , index = False , header = header , headerlines = headerlines
3327
3330
)
3328
- return df_ti
3331
+ return df_ti , nxs
3329
3332
3330
3333
3331
3334
def _get_use_rows (tpldf , idxcolvals , use_rows , zero_based , fnme , logger = None ):
@@ -3345,19 +3348,23 @@ def _get_use_rows(tpldf, idxcolvals, use_rows, zero_based, fnme, logger=None):
3345
3348
"""
3346
3349
if use_rows is None :
3347
3350
use_rows = tpldf .index
3348
- return use_rows
3351
+ nxs = [len (set (idx )) for idx in idxcolvals ]
3352
+ return use_rows , nxs
3349
3353
if np .ndim (use_rows ) == 0 :
3350
3354
use_rows = [use_rows ]
3351
3355
if np .ndim (use_rows ) == 1 : # assume we have collection of int that describe iloc
3352
3356
use_rows = [idx [i ] for i in use_rows for idx in idxcolvals ]
3357
+ else :
3358
+ use_rows = [tuple (r ) for r in use_rows ]
3359
+ nxs = [len (set (use_rows ).intersection (idx )) for idx in idxcolvals ]
3360
+ orig_use_rows = use_rows .copy ()
3353
3361
if not zero_based : # assume passed indicies are 1 based
3354
3362
use_rows = [
3355
- tuple ([i - 1 if isinstance (i , int ) else i for i in r ])
3363
+ tuple ([i - 1 if isinstance (i , ( int , np . integer ) ) else i for i in r ])
3356
3364
if not isinstance (r , str )
3357
3365
else r
3358
3366
for r in use_rows
3359
3367
]
3360
- orig_use_rows = use_rows
3361
3368
use_rows = set (use_rows )
3362
3369
sel = tpldf .sidx .isin (use_rows ) | tpldf .idx_strs .isin (use_rows )
3363
3370
if not sel .any (): # use_rows must be ints
@@ -3387,7 +3394,7 @@ def _get_use_rows(tpldf, idxcolvals, use_rows, zero_based, fnme, logger=None):
3387
3394
else :
3388
3395
warnings .warn (msg , PyemuWarning )
3389
3396
use_rows = tpldf .index
3390
- return use_rows
3397
+ return use_rows , nxs
3391
3398
3392
3399
3393
3400
def _get_index_strfmt (index_cols ):
@@ -3590,7 +3597,6 @@ def _get_tpl_or_ins_df(
3590
3597
Private method to auto-generate parameter or obs names from tabular
3591
3598
model files (input or output) read into pandas dataframes
3592
3599
Args:
3593
- filenames (`str` or `list` of str`): filenames
3594
3600
dfs (`pandas.DataFrame` or `list`): DataFrames (can be list of DataFrames)
3595
3601
to set up parameters or observations
3596
3602
name (`str`): Parameter name or Observation name prefix
@@ -3633,8 +3639,6 @@ def _get_tpl_or_ins_df(
3633
3639
3634
3640
# work out the union of indices across all dfs
3635
3641
if typ != "obs" :
3636
-
3637
-
3638
3642
sidx = set ()
3639
3643
for df in dfs :
3640
3644
# looses ordering
0 commit comments