33import subprocess
44import time
55from pathlib import Path
6+ from typing import List , Tuple
67
78from ci .jobs .scripts .integration_tests_configs import IMAGES_ENV , get_optimal_test_batch
89from ci .praktika .info import Info
@@ -80,6 +81,55 @@ def parse_args():
8081FLAKY_CHECK_MODULE_REPEAT_COUNT = 2
8182
8283
84+ def tests_to_run (
85+ batch_num : int , total_batches : int , args_test : List [str ]
86+ ) -> Tuple [List [str ], List [str ]]:
87+ if args_test :
88+ batch_num = 1
89+ total_batches = 1
90+
91+ test_files = [
92+ str (p .relative_to ("./tests/integration/" ))
93+ for p in Path ("./tests/integration/" ).glob ("test_*/test*.py" )
94+ ]
95+
96+ assert len (test_files ) > 100
97+
98+ parallel_test_modules , sequential_test_modules = get_optimal_test_batch (
99+ test_files , total_batches , batch_num , MAX_WORKERS
100+ )
101+ if not args_test :
102+ return parallel_test_modules , sequential_test_modules
103+
104+ # there are following possible values for args.test:
105+ # 1) test suit (e.g. test_directory or test_directory/)
106+ # 2) test module (e.g. test_directory/test_module or test_directory/test_module.py)
107+ # 3) test case (e.g. test_directory/test_module.py::test_case or test_directory/test_module::test_case[test_param])
108+ def test_match (test_file : str , test_arg : str ) -> bool :
109+ if "/" not in test_arg :
110+ return f"{ test_arg } /" in test_file
111+ if test_arg .endswith (".py" ):
112+ return test_file == test_arg
113+ test_arg = test_arg .split ("::" , maxsplit = 1 )[0 ]
114+ return test_file .removesuffix (".py" ) == test_arg .removesuffix (".py" )
115+
116+ parallel_tests = []
117+ sequential_tests = []
118+ for test_arg in args_test :
119+ matched = False
120+ for test_file in parallel_test_modules :
121+ if test_match (test_file , test_arg ):
122+ parallel_tests .append (test_arg )
123+ matched = True
124+ for test_file in sequential_test_modules :
125+ if test_match (test_file , test_arg ):
126+ sequential_tests .append (test_arg )
127+ matched = True
128+ assert matched , f"Test [{ test_arg } ] not found"
129+
130+ return parallel_tests , sequential_tests
131+
132+
83133def main ():
84134 sw = Utils .Stopwatch ()
85135 info = Info ()
@@ -92,7 +142,6 @@ def main():
92142 is_bugfix_validation = False
93143 is_parallel = False
94144 is_sequential = False
95- workers = MAX_WORKERS
96145 java_path = Shell .get_output (
97146 "update-alternatives --config java | sed -n 's/.*(providing \/usr\/bin\/java): //p'" ,
98147 verbose = True ,
@@ -185,28 +234,8 @@ def main():
185234 _start_docker_in_docker ()
186235 Shell .check ("docker info > /dev/null" , verbose = True , strict = True )
187236
188- test_files = []
189- for dir_name in os .listdir ("./tests/integration/" ):
190- if dir_name .startswith ("test_" ):
191- test_files .extend (
192- [
193- os .path .join (dir_name , file_name )
194- for file_name in os .listdir (
195- os .path .join ("./tests/integration/" , dir_name )
196- )
197- if file_name .endswith (".py" ) and file_name .startswith ("test" )
198- ]
199- )
200- test_files = [
201- f"{ dir_name } /{ file_name } " .replace ("./tests/integration/" , "" )
202- for dir_name , file_name in [
203- test_file .rsplit ("/" , 1 ) for test_file in test_files
204- ]
205- ]
206- assert len (test_files ) > 100
207-
208- parallel_test_modules , sequential_test_modules = get_optimal_test_batch (
209- test_files , total_batches , batch_num , workers
237+ parallel_test_modules , sequential_test_modules = tests_to_run (
238+ batch_num , total_batches , args .test
210239 )
211240
212241 if is_bugfix_validation or is_flaky_check :
@@ -249,75 +278,60 @@ def main():
249278 "JAVA_PATH" : java_path ,
250279 }
251280 test_results = []
252- files = []
281+ failed_tests_files = []
253282
254283 has_error = False
255284 error_info = []
256285
257- if args .test :
258- test_result_specific = Result .from_pytest_run (
259- command = f"{ ' ' .join (args .test )} { '--pdb' if args .debug else '' } { repeat_option } " ,
260- cwd = "./tests/integration/" ,
261- env = test_env ,
262- pytest_report_file = f"{ temp_path } /pytest.jsonl" ,
263- )
264- test_results .extend (test_result_specific .results )
265- if test_result_specific .files :
266- files .extend (test_result_specific .files )
267- else :
268- module_repeat_cnt = 1 if not is_flaky_check else FLAKY_CHECK_MODULE_REPEAT_COUNT
269- if parallel_test_modules :
270- for attempt in range (module_repeat_cnt ):
271- test_result_parallel = Result .from_pytest_run (
272- command = f"{ ' ' .join (reversed (parallel_test_modules ))} --report-log-exclude-logs-on-passed-tests -n { workers } --dist=loadfile --tb=short { repeat_option } " ,
273- cwd = "./tests/integration/" ,
274- env = test_env ,
275- pytest_report_file = f"{ temp_path } /pytest_parallel.jsonl" ,
286+ module_repeat_cnt = 1 if not is_flaky_check else FLAKY_CHECK_MODULE_REPEAT_COUNT
287+ if parallel_test_modules :
288+ for attempt in range (module_repeat_cnt ):
289+ test_result_parallel = Result .from_pytest_run (
290+ command = f"{ ' ' .join (reversed (parallel_test_modules ))} --report-log-exclude-logs-on-passed-tests -n { MAX_WORKERS } --dist=loadfile --tb=short { repeat_option } " ,
291+ cwd = "./tests/integration/" ,
292+ env = test_env ,
293+ pytest_report_file = f"{ temp_path } /pytest_parallel.jsonl" ,
294+ )
295+ if is_flaky_check and not test_result_parallel .is_ok ():
296+ print (
297+ f"Flaky check: Test run fails after attempt [{ attempt + 1 } /{ module_repeat_cnt } ] - break"
276298 )
277- if not test_result_parallel .is_ok ():
278- print (
279- f"Flaky check: Test run fails after attempt [{ attempt + 1 } /{ module_repeat_cnt } ] - break"
280- )
281- break
282- test_results .extend (test_result_parallel .results )
283- if test_result_parallel .files :
284- files .extend (test_result_parallel .files )
285- if test_result_parallel .is_error ():
286- has_error = True
287- error_info .append (test_result_parallel .info )
288-
289- fail_num = len ([r for r in test_results if not r .is_ok ()])
290- if (
291- sequential_test_modules
292- and fail_num < MAX_FAILS_BEFORE_DROP
293- and not has_error
294- ):
295- for attempt in range (module_repeat_cnt ):
296- test_result_sequential = Result .from_pytest_run (
297- command = f"{ ' ' .join (sequential_test_modules )} --report-log-exclude-logs-on-passed-tests --tb=short { repeat_option } -n 1 --dist=loadfile" ,
298- env = test_env ,
299- cwd = "./tests/integration/" ,
300- pytest_report_file = f"{ temp_path } /pytest_sequential.jsonl" ,
299+ break
300+ test_results .extend (test_result_parallel .results )
301+ if test_result_parallel .files :
302+ failed_tests_files .extend (test_result_parallel .files )
303+ if test_result_parallel .is_error ():
304+ has_error = True
305+ error_info .append (test_result_parallel .info )
306+
307+ fail_num = len ([r for r in test_results if not r .is_ok ()])
308+ if sequential_test_modules and fail_num < MAX_FAILS_BEFORE_DROP and not has_error :
309+ for attempt in range (module_repeat_cnt ):
310+ test_result_sequential = Result .from_pytest_run (
311+ command = f"{ ' ' .join (sequential_test_modules )} --report-log-exclude-logs-on-passed-tests --tb=short { repeat_option } -n 1 --dist=loadfile" ,
312+ env = test_env ,
313+ cwd = "./tests/integration/" ,
314+ pytest_report_file = f"{ temp_path } /pytest_sequential.jsonl" ,
315+ )
316+ if is_flaky_check and not test_result_sequential .is_ok ():
317+ print (
318+ f"Flaky check: Test run fails after attempt [{ attempt + 1 } /{ module_repeat_cnt } ] - break"
301319 )
302- if not test_result_sequential .is_ok ():
303- print (
304- f"Flaky check: Test run fails after attempt [{ attempt + 1 } /{ module_repeat_cnt } ] - break"
305- )
306- break
307- test_results .extend (test_result_sequential .results )
308- if test_result_sequential .files :
309- files .extend (test_result_sequential .files )
310- if test_result_sequential .is_error ():
311- has_error = True
312- error_info .append (test_result_sequential .info )
320+ break
321+ test_results .extend (test_result_sequential .results )
322+ if test_result_sequential .files :
323+ failed_tests_files .extend (test_result_sequential .files )
324+ if test_result_sequential .is_error ():
325+ has_error = True
326+ error_info .append (test_result_sequential .info )
313327
314328 # Remove iptables rule added in tests
315329 Shell .check ("sudo iptables -D DOCKER-USER 1 ||:" , verbose = True )
316330
317331 if not info .is_local_run :
318332 print ("Dumping dmesg" )
319333 Shell .check ("dmesg -T > dmesg.log" , verbose = True , strict = True )
320- files .append ("dmesg.log" )
334+ failed_tests_files .append ("dmesg.log" )
321335 with open ("dmesg.log" , "rb" ) as dmesg :
322336 dmesg = dmesg .read ()
323337 if (
@@ -331,16 +345,27 @@ def main():
331345 )
332346 )
333347
348+ files = []
334349 if not info .is_local_run :
335350 failed_suits = []
351+ # Collect docker compose configs used in tests
352+ config_files = [
353+ str (p )
354+ for p in Path ("./tests/integration/" ).glob ("test_*/_instances*/*/configs/" )
355+ ]
336356 for test_result in test_results :
337357 if not test_result .is_ok () and ".py" in test_result .name :
338358 failed_suits .append (test_result .name .split ("/" )[0 ])
339359 failed_suits = list (set (failed_suits ))
340360 for failed_suit in failed_suits :
341- files .append (f"tests/integration/{ failed_suit } " )
361+ failed_tests_files .append (f"tests/integration/{ failed_suit } " )
342362
343- files = [Utils .compress_files_gz (files , f"{ temp_path } /logs.tar.gz" )]
363+ files .append (
364+ Utils .compress_files_gz (failed_tests_files , f"{ temp_path } /logs.tar.gz" )
365+ )
366+ files .append (
367+ Utils .compress_files_gz (config_files , f"{ temp_path } /configs.tar.gz" )
368+ )
344369
345370 R = Result .create_from (results = test_results , stopwatch = sw , files = files )
346371
0 commit comments