File tree Expand file tree Collapse file tree 2 files changed +7
-3
lines changed
sql/core/src/main/scala/org/apache/spark/sql/parquet Expand file tree Collapse file tree 2 files changed +7
-3
lines changed Original file line number Diff line number Diff line change @@ -126,9 +126,13 @@ private[sql] case class ParquetTableScan(
126126 conf)
127127
128128 if (requestedPartitionOrdinals.nonEmpty) {
129- // This check if based on CatalystConverter.createRootConverter.
129+ // This check is based on CatalystConverter.createRootConverter.
130130 val primitiveRow = output.forall(a => ParquetTypesConverter .isPrimitiveType(a.dataType))
131131
132+ // Uses temporary variable to avoid the whole `ParquetTableScan` object being captured into
133+ // the `mapPartitionsWithInputSplit` closure below.
134+ val outputSize = output.size
135+
132136 baseRDD.mapPartitionsWithInputSplit { case (split, iter) =>
133137 val partValue = " ([^=]+)=([^=]+)" .r
134138 val partValues =
@@ -165,7 +169,7 @@ private[sql] case class ParquetTableScan(
165169 }
166170 } else {
167171 // Create a mutable row since we need to fill in values from partition columns.
168- val mutableRow = new GenericMutableRow (output.size )
172+ val mutableRow = new GenericMutableRow (outputSize )
169173 new Iterator [Row ] {
170174 def hasNext = iter.hasNext
171175 def next () = {
Original file line number Diff line number Diff line change @@ -476,7 +476,7 @@ private[sql] case class ParquetRelation2(
476476 // When the data does not include the key and the key is requested then we must fill it in
477477 // based on information from the input split.
478478 if (! partitionKeysIncludedInDataSchema && partitionKeyLocations.nonEmpty) {
479- // This check if based on CatalystConverter.createRootConverter.
479+ // This check is based on CatalystConverter.createRootConverter.
480480 val primitiveRow =
481481 requestedSchema.forall(a => ParquetTypesConverter .isPrimitiveType(a.dataType))
482482
You can’t perform that action at this time.
0 commit comments