@@ -41,10 +41,10 @@ class RecordingManagedBuffer(underlyingBuffer: NioManagedBuffer) extends Managed
4141 var callsToRetain = 0
4242 var callsToRelease = 0
4343
44- override def size () = underlyingBuffer.size()
45- override def nioByteBuffer () = underlyingBuffer.nioByteBuffer()
46- override def createInputStream () = underlyingBuffer.createInputStream()
47- override def convertToNetty () = underlyingBuffer.convertToNetty()
44+ override def size (): Long = underlyingBuffer.size()
45+ override def nioByteBuffer (): ByteBuffer = underlyingBuffer.nioByteBuffer()
46+ override def createInputStream (): InputStream = underlyingBuffer.createInputStream()
47+ override def convertToNetty (): AnyRef = underlyingBuffer.convertToNetty()
4848
4949 override def retain (): ManagedBuffer = {
5050 callsToRetain += 1
@@ -81,7 +81,7 @@ class HashShuffleReaderSuite extends SparkFunSuite with LocalSparkContext {
8181 // Create a return function to use for the mocked wrapForCompression method that just returns
8282 // the original input stream.
8383 val dummyCompressionFunction = new Answer [InputStream ] {
84- override def answer (invocation : InvocationOnMock ) =
84+ override def answer (invocation : InvocationOnMock ): InputStream =
8585 invocation.getArguments()(1 ).asInstanceOf [InputStream ]
8686 }
8787
@@ -118,7 +118,7 @@ class HashShuffleReaderSuite extends SparkFunSuite with LocalSparkContext {
118118 // Test a scenario where all data is local, just to avoid creating a bunch of additional mocks
119119 // for the code to read data over the network.
120120 val statuses : Array [(BlockManagerId , Long )] =
121- Array .fill(numMaps)((localBlockManagerId, byteOutputStream.size()))
121+ Array .fill(numMaps)((localBlockManagerId, byteOutputStream.size().toLong ))
122122 when(mapOutputTracker.getServerStatuses(shuffleId, reduceId)).thenReturn(statuses)
123123
124124 // Create a mocked shuffle handle to pass into HashShuffleReader.
0 commit comments