...
No Format |
---|
# see HIVE-9153 mapreduce.input.fileinputformat.split.maxsize=750000000 hive.vectorized.execution.enabled=true hive.cbo.enable=true hive.optimize.reducededuplication.min.reducer=4 hive.optimize.reducededuplication=true hive.orc.splits.include.file.footer=false hive.merge.mapfiles=true hive.merge.sparkfiles=false hive.merge.smallfiles.avgsize=16000000 hive.merge.size.per.task=256000000 hive.merge.orcfile.stripe.level=true hive.auto.convert.join=true hive.auto.convert.join.noconditionaltask=true hive.auto.convert.join.noconditionaltask.size=894435328 hive.optimize.bucketmapjoin.sortedmerge=false hive.map.aggr.hash.percentmemory=0.5 hive.map.aggr=true hive.optimize.sort.dynamic.partition=false hive.stats.autogather=true hive.stats.fetch.column.stats=true hive.vectorized.execution.reduce.enabled=false hive.vectorized.groupby.checkinterval=4096 hive.vectorized.groupby.flush.percent=0.1 hive.compute.query.using.stats=true hive.limit.pushdown.memory.usage=0.4 hive.optimize.index.filter=true hive.exec.reducers.bytes.per.reducer=67108864 hive.smbjoin.cache.rows=10000 hive.exec.orc.default.stripe.size=67108864 hive.fetch.task.conversion=more hive.fetch.task.conversion.threshold=1073741824 hive.fetch.task.aggr=false mapreduce.input.fileinputformat.list-status.num-threads=5 spark.kryo.referenceTracking=false spark.kryo.classesToRegister=org.apache.hadoop.hive.ql.io.HiveKey,org.apache.hadoop.io.BytesWritable,org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch |