Skip to content

Commit 92efe33

Browse files
authored
feat: ui, support disable cache (#1217)
* fix: (drop) add cache_enable for functions in ui.utils * fix: (drop) handle no logging, rename sota_exp to last_sota_exp for clarity in running_win
1 parent af9068c commit 92efe33

File tree

2 files changed

+31
-11
lines changed

2 files changed

+31
-11
lines changed

rdagent/log/ui/ds_trace.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -475,7 +475,7 @@ def coding_win(data, base_exp, llm_data: dict | None = None):
475475
workspace_win(data["no_tag"].experiment_workspace)
476476

477477

478-
def running_win(data, base_exp, llm_data=None, sota_exp=None):
478+
def running_win(data, base_exp, llm_data=None, last_sota_exp=None):
479479
st.header("Running", divider="blue", anchor="running")
480480
if llm_data is not None:
481481
common_llm_data = llm_data.pop("no_tag", [])
@@ -491,7 +491,7 @@ def running_win(data, base_exp, llm_data=None, sota_exp=None):
491491
st.subheader("Exp Workspace (running final)")
492492
workspace_win(
493493
data["no_tag"].experiment_workspace,
494-
cmp_workspace=sota_exp.experiment_workspace if sota_exp else None,
494+
cmp_workspace=last_sota_exp.experiment_workspace if last_sota_exp else None,
495495
cmp_name="last SOTA(to_submit)",
496496
)
497497
st.subheader("Result")
@@ -555,7 +555,7 @@ def main_win(loop_id, llm_data=None):
555555
)
556556
if "running" in loop_data:
557557
# get last SOTA_exp_to_submit
558-
sota_exp = None
558+
last_sota_exp = None
559559
if "record" in loop_data:
560560
current_trace = loop_data["record"]["trace"]
561561
current_selection = current_trace.get_current_selection()
@@ -565,13 +565,15 @@ def main_win(loop_id, llm_data=None):
565565
if len(parent_idxs) >= 2 and hasattr(current_trace, "idx2loop_id"):
566566
parent_idx = parent_idxs[-2]
567567
parent_loop_id = current_trace.idx2loop_id[parent_idx]
568-
sota_exp = state.data[parent_loop_id]["record"].get("sota_exp_to_submit", None)
568+
if parent_loop_id in state.data:
569+
# in some cases, the state.data is synthesized, logs does not necessarily exist
570+
last_sota_exp = state.data[parent_loop_id]["record"].get("sota_exp_to_submit", None)
569571

570572
running_win(
571573
loop_data["running"],
572574
base_exp=loop_data["coding"].get("no_tag", None),
573575
llm_data=llm_data["running"] if llm_data else None,
574-
sota_exp=sota_exp,
576+
last_sota_exp=last_sota_exp,
575577
)
576578
if "feedback" in loop_data:
577579
feedback_win(loop_data["feedback"], llm_data.get("feedback", None) if llm_data else None)

rdagent/log/ui/utils.py

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,6 @@ def map_stat(sota_mle_score: dict | None) -> str:
161161
return sota_exp_stat
162162

163163

164-
@cache_with_pickle(_log_path_hash_func, force=True)
165164
def get_best_report(log_path: Path) -> dict | None:
166165
log_storage = FileStorage(log_path)
167166
mle_reports = [extract_json(i.content) for i in log_storage.iter_msg(pattern="**/running/mle_score/*/*.pkl")]
@@ -176,11 +175,14 @@ def get_best_report(log_path: Path) -> dict | None:
176175
return None
177176

178177

178+
if UI_SETTING.enable_cache:
179+
get_best_report = cache_with_pickle(_log_path_hash_func, force=True)(get_best_report)
180+
181+
179182
def _get_sota_exp_stat_hash_func(log_path: Path, selector: Literal["auto", "best_valid"] = "auto") -> str:
180183
return _log_path_hash_func(log_path) + selector
181184

182185

183-
@cache_with_pickle(_get_sota_exp_stat_hash_func, force=True)
184186
def get_sota_exp_stat(
185187
log_path: Path, selector: Literal["auto", "best_valid"] = "auto"
186188
) -> tuple[DSExperiment | None, int | None, dict | None, str | None]:
@@ -253,11 +255,14 @@ def get_sota_exp_stat(
253255
return sota_exp, sota_loop_id, sota_mle_score, map_stat(sota_mle_score)
254256

255257

258+
if UI_SETTING.enable_cache:
259+
get_sota_exp_stat = cache_with_pickle(_get_sota_exp_stat_hash_func, force=True)(get_sota_exp_stat)
260+
261+
256262
def _get_score_stat_hash_func(log_path: Path, sota_loop_id: int) -> str:
257263
return _log_path_hash_func(log_path) + str(sota_loop_id)
258264

259265

260-
@cache_with_pickle(_get_score_stat_hash_func, force=True)
261266
def get_score_stat(log_path: Path, sota_loop_id: int) -> tuple[float | None, float | None, bool, float | None]:
262267
"""
263268
Get the scores before and after merge period.
@@ -351,7 +356,10 @@ def get_score_stat(log_path: Path, sota_loop_id: int) -> tuple[float | None, flo
351356
return valid_improve, test_improve, submit_is_merge, merge_sota_rate
352357

353358

354-
@cache_with_pickle(_log_path_hash_func, force=True)
359+
if UI_SETTING.enable_cache:
360+
get_score_stat = cache_with_pickle(_get_score_stat_hash_func, force=True)(get_score_stat)
361+
362+
355363
def load_times_deprecated(log_path: Path):
356364
try:
357365
session_path = log_path / "__session__"
@@ -365,7 +373,10 @@ def load_times_deprecated(log_path: Path):
365373
return rd_times
366374

367375

368-
@cache_with_pickle(_log_path_hash_func, force=True)
376+
if UI_SETTING.enable_cache:
377+
load_times_deprecated = cache_with_pickle(_log_path_hash_func, force=True)(load_times_deprecated)
378+
379+
369380
def load_times_info(log_path: Path) -> dict[int, dict[str, dict[Literal["start_time", "end_time"], datetime]]]:
370381
"""
371382
Load timing information for each loop and step.
@@ -403,6 +414,10 @@ def load_times_info(log_path: Path) -> dict[int, dict[str, dict[Literal["start_t
403414
return times_info
404415

405416

417+
if UI_SETTING.enable_cache:
418+
load_times_info = cache_with_pickle(_log_path_hash_func, force=True)(load_times_info)
419+
420+
406421
def _log_folders_summary_hash_func(log_folder: str | Path, hours: int | None = None):
407422
summary_p = Path(log_folder) / (f"summary.pkl" if hours is None else f"summary_{hours}h.pkl")
408423
if summary_p.exists():
@@ -412,7 +427,6 @@ def _log_folders_summary_hash_func(log_folder: str | Path, hours: int | None = N
412427
return md5_hash(hash_str)
413428

414429

415-
@cache_with_pickle(_log_folders_summary_hash_func, force=True)
416430
def get_summary_df(log_folder: str | Path, hours: int | None = None) -> tuple[dict, pd.DataFrame]:
417431
"""Process experiment logs and generate summary DataFrame.
418432
@@ -645,6 +659,10 @@ def compare_score(s1, s2):
645659
return summary, base_df
646660

647661

662+
if UI_SETTING.enable_cache:
663+
get_summary_df = cache_with_pickle(_log_folders_summary_hash_func, force=True)(get_summary_df)
664+
665+
648666
def percent_df(summary_df: pd.DataFrame, show_origin=True) -> pd.DataFrame:
649667
"""
650668
Convert the summary DataFrame to a percentage format.

0 commit comments

Comments
 (0)