@@ -207,51 +207,21 @@ def uncertainty(self,
207207 self .calc_eai_exp = calc_eai_exp
208208 self .calc_at_event = calc_at_event
209209
210- start = time .time ()
211210 one_sample = samples_df .iloc [0 :1 ]
212- p_iterator = _sample_parallel_iterator (
213- samples = one_sample ,
214- chunksize = chunksize ,
215- exp_input_var = self .exp_input_var ,
216- impf_input_var = self .impf_input_var ,
217- haz_input_var = self .haz_input_var ,
218- rp = rp ,
219- calc_eai_exp = calc_eai_exp ,
220- calc_at_event = calc_at_event
211+ start = time .time ()
212+ self ._compute_imp_metrics (
213+ one_sample , rp , calc_eai_exp , calc_at_event , chunksize = 1 , processes = 1
221214 )
222- imp_metrics = itertools .starmap (_map_impact_calc , p_iterator )
223- [aai_agg_list , freq_curve_list ,
224- eai_exp_list , at_event_list ] = _transpose_chunked_data (imp_metrics )
225215 elapsed_time = (time .time () - start )
226216 self .est_comp_time (unc_sample .n_samples , elapsed_time , processes )
227217
228- #Compute impact distributions
229- with log_level (level = 'ERROR' , name_prefix = 'climada' ):
230- p_iterator = _sample_parallel_iterator (
231- samples = samples_df ,
232- chunksize = chunksize ,
233- exp_input_var = self .exp_input_var ,
234- impf_input_var = self .impf_input_var ,
235- haz_input_var = self .haz_input_var ,
236- rp = rp ,
237- calc_eai_exp = calc_eai_exp ,
238- calc_at_event = calc_at_event ,
218+ [aai_agg_list ,
219+ freq_curve_list ,
220+ eai_exp_list ,
221+ at_event_list ] = self ._compute_imp_metrics (
222+ samples_df , rp , calc_eai_exp , calc_at_event ,
223+ chunksize = chunksize , processes = processes
239224 )
240- if processes > 1 :
241- with mp .Pool (processes = processes ) as pool :
242- LOGGER .info ('Using %s CPUs.' , processes )
243- imp_metrics = pool .starmap (
244- _map_impact_calc , p_iterator
245- )
246- else :
247- imp_metrics = itertools .starmap (
248- _map_impact_calc , p_iterator
249- )
250-
251- #Perform the actual computation
252- with log_level (level = 'ERROR' , name_prefix = 'climada' ):
253- [aai_agg_list , freq_curve_list ,
254- eai_exp_list , at_event_list ] = _transpose_chunked_data (imp_metrics )
255225
256226 # Assign computed impact distribution data to self
257227 aai_agg_unc_df = pd .DataFrame (aai_agg_list ,
@@ -280,6 +250,36 @@ def uncertainty(self,
280250 coord_df = coord_df
281251 )
282252
253+ def _compute_imp_metrics (
254+ self , samples_df , rp , calc_eai_exp , calc_at_event , chunksize , processes
255+ ):
256+ #Compute impact distributions
257+ with log_level (level = 'ERROR' , name_prefix = 'climada' ):
258+ p_iterator = _sample_parallel_iterator (
259+ samples = samples_df ,
260+ chunksize = chunksize ,
261+ exp_input_var = self .exp_input_var ,
262+ impf_input_var = self .impf_input_var ,
263+ haz_input_var = self .haz_input_var ,
264+ rp = rp ,
265+ calc_eai_exp = calc_eai_exp ,
266+ calc_at_event = calc_at_event ,
267+ )
268+ if processes > 1 :
269+ with mp .Pool (processes = processes ) as pool :
270+ LOGGER .info ('Using %s CPUs.' , processes )
271+ imp_metrics = pool .starmap (
272+ _map_impact_calc , p_iterator
273+ )
274+ else :
275+ imp_metrics = itertools .starmap (
276+ _map_impact_calc , p_iterator
277+ )
278+
279+ #Perform the actual computation
280+ with log_level (level = 'ERROR' , name_prefix = 'climada' ):
281+ return _transpose_chunked_data (imp_metrics )
282+
283283
284284def _map_impact_calc (
285285 sample_chunks , exp_input_var , impf_input_var , haz_input_var ,
0 commit comments