|
17 | 17 |
|
18 | 18 | package org.apache.spark.sql.execution.benchmark
|
19 | 19 |
|
| 20 | +import java.util.UUID |
| 21 | + |
20 | 22 | import scala.util.Random
|
21 | 23 |
|
22 | 24 | import org.apache.hadoop.conf.Configuration
|
23 | 25 |
|
24 | 26 | import org.apache.spark.benchmark.Benchmark
|
25 | 27 | import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow}
|
| 28 | +import org.apache.spark.sql.execution.streaming.StreamExecution |
26 | 29 | import org.apache.spark.sql.execution.streaming.state.{HDFSBackedStateStoreProvider, NoPrefixKeyStateEncoderSpec, RocksDBStateStoreProvider, StateStore, StateStoreConf, StateStoreId, StateStoreProvider}
|
27 | 30 | import org.apache.spark.sql.internal.SQLConf
|
28 | 31 | import org.apache.spark.sql.types.{IntegerType, StructField, StructType, TimestampType}
|
@@ -477,11 +480,16 @@ object StateStoreBasicOperationsBenchmark extends SqlBasedBenchmark {
|
477 | 480 | val sqlConf = new SQLConf()
|
478 | 481 | sqlConf.setConfString("spark.sql.streaming.stateStore.rocksdb.trackTotalNumberOfRows",
|
479 | 482 | trackTotalNumberOfRows.toString)
|
| 483 | + sqlConf.setConfString("spark.sql.streaming.stateStore.coordinatorReportSnapshotUploadLag", |
| 484 | + false.toString) |
480 | 485 | val storeConf = new StateStoreConf(sqlConf)
|
481 | 486 |
|
| 487 | + val configuration = new Configuration |
| 488 | + configuration.set(StreamExecution.RUN_ID_KEY, UUID.randomUUID().toString) |
| 489 | + |
482 | 490 | provider.init(
|
483 | 491 | storeId, keySchema, valueSchema, NoPrefixKeyStateEncoderSpec(keySchema),
|
484 |
| - useColumnFamilies = useColumnFamilies, storeConf, new Configuration, |
| 492 | + useColumnFamilies = useColumnFamilies, storeConf, configuration, |
485 | 493 | useMultipleValuesPerKey = useMultipleValuesPerKey)
|
486 | 494 | provider
|
487 | 495 | }
|
|
0 commit comments