| { |
| "configs": { |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.path": "./", |
| "spark.hadoop.input.write.timeout": "180000", |
| "spark.hadoop.fs.s3a.connection.ssl.enabled": "false", |
| "spark.hadoop.dfs.client.log.severity": "INFO", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.use_local_format": "true", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.dump_pipeline": "true", |
| "spark.hadoop.fs.s3a.access.key": "", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.input_read_timeout": "180000", |
| "spark.gluten.sql.columnar.backend.velox.IOThreads": "0", |
| "spark.gluten.sql.columnar.backend.ch.worker.id": "1", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs_cache.type": "cache", |
| "spark.hadoop.fs.s3a.use.instance.credentials": "false", |
| "spark.gluten.memory.offHeap.size.in.bytes": "10737418240", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.logger.level": "test", |
| "spark.gluten.sql.columnar.shuffle.codec": "", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs.type": "hdfs_gluten", |
| "spark.hadoop.fs.s3a.path.style.access": "true", |
| "spark.hadoop.fs.s3a.iam.role.session.name": "", |
| "spark.hadoop.input.read.timeout": "180000", |
| "spark.hadoop.fs.s3a.secret.key": "", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.dfs_client_log_severity": "INFO", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs_cache.disk": "hdfs", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.policies.__hdfs_main.volumes": "main", |
| "spark.gluten.sql.columnar.backend.ch.runtime_settings.max_bytes_before_external_sort": "5368709120", |
| "spark.gluten.sql.columnar.backend.ch.runtime_settings.output_format_orc_compression_method": "snappy", |
| "spark.sql.orc.compression.codec": "snappy", |
| "spark.hadoop.fs.s3a.iam.role": "", |
| "spark.hadoop.input.connect.timeout": "180000", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.input_connect_timeout": "180000", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.dfs_client_read_shortcircuit": "false", |
| "spark.hadoop.fs.s3a.endpoint": "localhost:9000", |
| "spark.memory.offHeap.enabled": "true", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs_cache.path": "/tmp/hdfs_cache/3.5/", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.dfs_default_replica": "1", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.timezone": "Asia/Shanghai", |
| "spark.gluten.sql.columnar.backend.ch.runtime_settings.max_bytes_before_external_group_by": "5368709120", |
| "spark.gluten.sql.columnar.backend.velox.SplitPreloadPerDriver": "2", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs.endpoint": "hdfs://127.0.0.1:8020/", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs_cache.max_size": "10Gi", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs.metadata_path": "/tmp/metadata/hdfs/3.5/", |
| "spark.gluten.sql.columnar.shuffle.codecBackend": "", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.input_write_timeout": "180000", |
| "spark.gluten.memory.task.offHeap.size.in.bytes": "10737418240", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.policies.__hdfs_main.volumes.main.disk": "hdfs_cache", |
| "spark.gluten.sql.columnar.backend.ch.runtime_config.local_engine.settings.log_processors_profiles": "true" |
| } |
| } |