|
17 | 17 |
|
18 | 18 | package org.apache.spark.sql.execution |
19 | 19 |
|
20 | | -import java.util.concurrent.ExecutionException |
21 | | - |
22 | 20 | import org.apache.spark.sql.Row |
23 | | -import org.apache.spark.sql.catalyst.expressions.codegen.{CodeAndComment, CodegenContext, CodeGenerator} |
| 21 | +import org.apache.spark.sql.catalyst.expressions.codegen.{CodeAndComment, CodeGenerator} |
24 | 22 | import org.apache.spark.sql.execution.aggregate.HashAggregateExec |
25 | 23 | import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec |
26 | 24 | import org.apache.spark.sql.execution.joins.SortMergeJoinExec |
@@ -151,7 +149,7 @@ class WholeStageCodegenSuite extends SparkPlanTest with SharedSQLContext { |
151 | 149 | } |
152 | 150 | } |
153 | 151 |
|
154 | | - def genGroupByCodeGenContext(caseNum: Int): CodeAndComment = { |
| 152 | + def genGroupByCode(caseNum: Int): CodeAndComment = { |
155 | 153 | val caseExp = (1 to caseNum).map { i => |
156 | 154 | s"case when id > $i and id <= ${i + 1} then 1 else 0 end as v$i" |
157 | 155 | }.toList |
@@ -180,10 +178,10 @@ class WholeStageCodegenSuite extends SparkPlanTest with SharedSQLContext { |
180 | 178 | } |
181 | 179 |
|
182 | 180 | test("SPARK-21871 check if we can get large code size when compiling too long functions") { |
183 | | - val codeWithShortFunctions = genGroupByCodeGenContext(3) |
| 181 | + val codeWithShortFunctions = genGroupByCode(3) |
184 | 182 | val (_, maxCodeSize1) = CodeGenerator.compile(codeWithShortFunctions) |
185 | 183 | assert(maxCodeSize1 < SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.defaultValue.get) |
186 | | - val codeWithLongFunctions = genGroupByCodeGenContext(20) |
| 184 | + val codeWithLongFunctions = genGroupByCode(20) |
187 | 185 | val (_, maxCodeSize2) = CodeGenerator.compile(codeWithLongFunctions) |
188 | 186 | assert(maxCodeSize2 > SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.defaultValue.get) |
189 | 187 | } |
|
0 commit comments