Skip to content

Commit 9344635

Browse files
authored
feat: add improve_mode to MultiProcessEvolvingStrategy for selective task implementation (#1273)
1 parent 40876e2 commit 9344635

File tree

2 files changed

+4
-7
lines changed

2 files changed

+4
-7
lines changed

rdagent/components/coder/CoSTEER/evolving_strategy.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,10 @@
2121
class MultiProcessEvolvingStrategy(EvolvingStrategy):
2222
KEY_CHANGE_SUMMARY = "__change_summary__" # Optional key for the summary of the change of evolving subjects
2323

24-
def __init__(self, scen: Scenario, settings: CoSTEERSettings):
24+
def __init__(self, scen: Scenario, settings: CoSTEERSettings, improve_mode: bool = False):
2525
super().__init__(scen)
2626
self.settings = settings
27+
self.improve_mode = improve_mode # improve mode means we only implement the task which has failed before. The main diff is the first loop will not implement all tasks.
2728

2829
@abstractmethod
2930
def implement_one_task(
@@ -93,6 +94,7 @@ def evolve(
9394
elif (
9495
target_task_desc not in queried_knowledge.success_task_to_knowledge_dict
9596
and target_task_desc not in queried_knowledge.failed_task_info_set
97+
and not (self.improve_mode and last_feedback[index] is None)
9698
):
9799
to_be_finished_task_index.append(index)
98100

rdagent/scenarios/data_science/dev/runner/__init__.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,6 @@ def implement_one_task(
4848
workspace: FBWorkspace | None = None,
4949
prev_task_feedback: CoSTEERSingleFeedback | None = None,
5050
) -> dict[str, str]:
51-
52-
if prev_task_feedback is None:
53-
# if no prev_task_feedback, it is the first loop; we do not make any changes and goto evaluators directly.
54-
return {}
55-
5651
# Get evolving history
5752
task_info = target_task.get_task_information()
5853
queried_former_failed_knowledge = (
@@ -157,7 +152,7 @@ def __init__(
157152
single_evaluator=eval_l, scen=scen
158153
) # Please specify whether you agree running your eva in parallel or not
159154
settings = DSRunnerCoSTEERSettings()
160-
es = DSRunnerMultiProcessEvolvingStrategy(scen=scen, settings=settings)
155+
es = DSRunnerMultiProcessEvolvingStrategy(scen=scen, settings=settings, improve_mode=True)
161156

162157
# In runner, we don't need very big loops, so we set max_loop to runner_max_loop
163158
super().__init__(

0 commit comments

Comments
 (0)