Andres
11/29/2023, 5:14 PM{
"metricFieldSpecs": [],
"primaryKeyColumns": [
"device_id"
],
"dimensionFieldSpecs": [
{
"name": "country",
"dataType": "STRING"
},
{
"name": "device_id",
"dataType": "STRING"
},
{
"name": "device_type",
"dataType": "STRING"
},
{
"name": "all_segments",
"dataType": "INT",
"singleValueField": false
}
],
"dateTimeFieldSpecs": [],
"schemaName": "devices"
}
And my table:
{
"tableName": "devices",
"tableType": "OFFLINE",
"tenants": {},
"segmentsConfig": {
"replication": "1"
},
"tableIndexConfig": {
"loadMode": "MMAP",
"createInvertedIndexDuringSegmentGeneration": true,
"segmentPartitionConfig": {
"columnPartitionMap": {
"country": {
"functionName": "Murmur",
"numPartitions": 20
}
}
}
},
"routing": {
"segmentPrunerTypes": [
"partition"
]
},
"fieldConfigList": [
{
"name": "all_segments",
"indexes": {
"forward": {
"disabled": true
},
"inverted": {}
}
}
],
"metadata": {
"customConfigs": {}
}
}
But the table is not added when I check the controller UI, I'm not receiving any errors:
bin/pinot-admin.sh AddTable -schemaFile schema.json -tableConfigFile table.json -controllerHost localhost -controllerPort 9000 -exec
WARNING: sun.reflect.Reflection.getCallerClass is not supported. This will impact performance.
ERROR StatusLogger Reconfiguration failed: No configuration found for 'Default' at 'null' in 'null'
WARNING: An illegal reflective access operation has occurred
WARNING: Illegal reflective access by org.codehaus.groovy.reflection.CachedClass (file:/opt/pinot/lib/pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar) to method java.lang.Object.finalize()
WARNING: Please consider reporting this to the maintainers of org.codehaus.groovy.reflection.CachedClass
WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations
WARNING: All illegal access operations will be denied in a future release
I'm using the latest code from the current master
branchPrabhagaran Ks
11/29/2023, 5:40 PMselect * from table where id between 1 and 500000 order by id;
select * from table where id between 500001 and 1000000 order by id;
My understanding is that, at broker side it is being sorted again and causing latency
Expectioning something to be done like keyset pagination efficiently.
https://sureshdsk.dev/keyset-based-pagination-in-postgres中村朱理
11/30/2023, 1:20 AMread: connection reset by peer.
I think that is because it exceeded the simultaneous connection limit of Pinot.
For example, when connecting from a Go client to MySQL, sql.DB provides MaxConnection and MaxIdleConnection, allowing control over the simultaneous connection count to the database.
Does pinot-client-go provide similar methods?Kai
11/30/2023, 5:15 AMLakshmanan Velusamy
11/30/2023, 5:20 AMGlerin
11/30/2023, 7:10 AMjava.lang.NullPointerException: trust store password required
Here is the controller configuration
# Pinot Role
pinot.service.role=CONTROLLER
# Pinot Cluster name
pinot.cluster.name=pinot-quickstart
# Pinot Zookeeper Server
pinot.zk.server=localhost:2181
# Use hostname as Pinot Instance ID other than IP
pinot.set.instance.id.to.hostname=true
# Pinot Controller Port
controller.port=9000
# Pinot Controller VIP Host
controller.vip.host=localhost
# Pinot Controller VIP Port
#controller.vip.port=9000
# Location to store Pinot Segments pushed from clients
controller.data.dir=/tmp/pinot/data/controller
# new configuration adding for tls setup
controller.tls.truststore.path=/root/pinot/truststore.jks
controller.tls.truststore.password=xxxxx
controller.access.protocols=https
controller.access.protocols.https.port=9443
controller.broker.protocol=https
controller.vip.protocol=https
controller.vip.port=9443
2023/11/29 23:04:11.071 ERROR [StartServiceManagerCommand] [main] Failed to start a Pinot [CONTROLLER] at 25.451 since launch
java.lang.NullPointerException: trust store password required
at org.apache.pinot.shaded.com.google.common.base.Preconditions.checkNotNull(Preconditions.java:787) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.core.util.ListenerConfigUtil.buildSSLEngineConfigurator(ListenerConfigUtil.java:268) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.core.util.ListenerConfigUtil.configureListener(ListenerConfigUtil.java:238) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.core.util.ListenerConfigUtil.lambda$buildHttpServer$8(ListenerConfigUtil.java:222) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at java.util.ArrayList.forEach(ArrayList.java:1541) ~[?:?]
at org.apache.pinot.core.util.ListenerConfigUtil.buildHttpServer(ListenerConfigUtil.java:222) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.controller.api.ControllerAdminApiApplication.start(ControllerAdminApiApplication.java:79) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.controller.BaseControllerStarter.setUpPinotController(BaseControllerStarter.java:492) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.controller.BaseControllerStarter.start(BaseControllerStarter.java:328) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.service.PinotServiceManager.startController(PinotServiceManager.java:118) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.service.PinotServiceManager.startRole(PinotServiceManager.java:87) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.admin.command.StartServiceManagerCommand.lambda$startBootstrapServices$0(StartServiceManagerCommand.java:251) ~[pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.admin.command.StartServiceManagerCommand.startPinotService(StartServiceManagerCommand.java:304) [pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.admin.command.StartServiceManagerCommand.startBootstrapServices(StartServiceManagerCommand.java:250) [pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.admin.command.StartServiceManagerCommand.execute(StartServiceManagerCommand.java:196) [pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.admin.command.StartControllerCommand.execute(StartControllerCommand.java:187) [pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.Command.call(Command.java:33) [pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
at org.apache.pinot.tools.Command.call(Command.java:29) [pinot-all-0.12.1-jar-with-dependencies.jar:0.12.1-6e235a4ec2a16006337da04e118a435b5bb8f6d8]
Martin Horn
11/30/2023, 6:22 PMZhuangda Z
11/30/2023, 6:26 PMAndres
11/30/2023, 7:43 PMexecutionFrameworkSpec:
name: 'standalone'
segmentGenerationJobRunnerClassName: 'org.apache.pinot.plugin.ingestion.batch.standalone.SegmentGenerationJobRunner'
segmentTarPushJobRunnerClassName: 'org.apache.pinot.plugin.ingestion.batch.standalone.SegmentTarPushJobRunner'
jobType: SegmentCreationAndTarPush
inputDirURI: '<s3://sample-data/pinot-test/>'
includeFileNamePattern: 'glob:**/*.parquet'
outputDirURI: '<s3://sample-data/pinot-segments>'
overwriteOutput: true
segmentCreationJobParallelism: 10
pinotFSSpecs:
- scheme: s3
className: org.apache.pinot.plugin.filesystem.S3PinotFS
configs:
region: us-east-1
recordReaderSpec:
dataFormat: 'parquet'
className: 'org.apache.pinot.plugin.inputformat.parquet.ParquetRecordReader'
configClassName: 'org.apache.pinot.plugin.inputformat.parquet.ParquetRecordReaderConfig'
tableSpec:
tableName: 'devices'
pinotClusterSpecs:
- controllerURI: '<http://localhost:9000>'
Then I run:
bin/pinot-admin.sh LaunchDataIngestionJob -jobSpecFile job.yml
JVM options:
-XX:ActiveProcessorCount=15 -Xms1G -Xmx64G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-controller.log
Raounaq Sharma
12/01/2023, 6:25 AMwhere id='something123'
then we don’t get all the data associated with it because it purge the segments (so partitioning is working) but it’s not quering all segments containing data.
Specifications:
1. The hash function is Murmur
2. Number of Kafka partition and table matches.
3. I think Kafka also using Murmur but I am not sure. Also, does it matter that Kafka’s partitioning and Pinot’s table partitioning hash function should be the same?
A possible solution could be to try out all hash functions but I want to know the cause of it more precisely. Any help?Gurpreet Singh
12/01/2023, 6:33 AMpinot_server_queries_Count
metrics?Robert Głowacki
12/01/2023, 8:40 AMbin/start-admin.sh StartMinion -zkAddress localhost:2182
process is running but then trying to perform query to ingest data I got error - please see attachment - what I did wrong?
Additionally Minion is visible under zookeper browser -> Pinot Cluster -> Configs -> Participants, on the main dashboard minion task manager count is 1coco
12/01/2023, 10:08 AMYusuf Külah
12/01/2023, 9:05 PMERROR [ServerSegmentCompletionProtocolHandler] [recommended__11__0__20231201T2000Z] Could not send request <http://pinot-controller-0:9000/segmentConsumed?reason=rowLimit&streamPartitionMsgOffset=6553207479&instance=Server_pinot-server-2.pinot-server-headless_8098&offset=-1&name=recommended__11__0__20231201T2000Z&rowCount=5000000&memoryUsedBytes=316670821>
org.apache.pinot.shaded.org.apache.http.conn.HttpHostConnectException: Connect to pinot-controller-0:9000 [pinot-controller-0/10.2.71.220] failed: Connection timed out (Connection timed out)
As far as I figured out, servers are not able to commit the realtime table segment that is full. That’s why servers are not able to consume any more events from the topic.
Is it possible to increase the timeout duration for ServerSegmentCompletion ?Anuj Agrawal
12/02/2023, 4:00 PMcontroller.disable.ingestion.groovy=false
to my config/pinot-controller.conf but the issue is still not resolved.
Please note, that I am trying to use Groovy scripts via the QuickStart command and not by setting up a manual cluster i.e. starting ZK, Broker and Controller individually.
Can someone guide me on what could be the issue here?Anuj Agrawal
12/02/2023, 10:12 PMdef xmlSlurper = new groovy.xml.XmlSlurper();
but this seems to be failing giving an error - Invalid transform function.
When I was debugging the code, seems like FunctionEvaluatorFactory.getExpressionEvaluator();
is called and gives an error on my local i.e. unable to resolve class groovy.xml.XmlSlurper.
Is it possible to import a libary in the groovy script?Anuj Agrawal
12/03/2023, 12:51 AMGroovy({def xmlSlurper = new groovy.xml.XmlSlurper(); def object = xmlSlurper.parseText(payload); return object.dbEvent.@eventId.toString}, payload)
this was supposed to work as I tested this on a simple Groovy compiler.
But when I try to have Pinot read this config and form a table, I am getting below error : [screenshot]
can someone please advise on what am I missing?Andres
12/05/2023, 10:57 PMjobType: SegmentCreation
and stored them in S3, now I want to upload the segments to Pinot, should I use SegmentTarPush
or SegmentMetadataPush
?Malte Granderath
12/06/2023, 11:42 AMSegmentRelocator
task is running and moving COMPLETED segments. Is there any configuration that we are missing or is this possibly a bug? Table configuration in 🧵Robert Głowacki
12/06/2023, 12:45 PMDiego Pontoriero
12/06/2023, 6:27 PMsegmentPartitionConfig
and does this automatically, if you have to do something manually when creating the input files for the batch, or none of the above.
also related i guess, what happens if you add partitions to a kafka topic? presumably all sorts of complexities. anyway that’s more for my curiosity. i’m hoping we’ll stick to regular append-only tables for now but want to make sure i’m covered if we need any upsert tables.Andres
12/06/2023, 9:11 PMSegmentCreation
job and stored them in S3, I'm disabling the forward index in my largest MV column to save disk space as I only want to use an inverted index on it. After that I'm uploading the segments to the server with SegmentMetadataPush
. I'm observing the uncompressed segments in the server are much bigger than the uncompressed data in S3. When comparing the metadata, this is what I get. Segment metadata in S3:
all_segments.dictionary.startOffset = 0
all_segments.dictionary.size = 56400
all_segments.inverted_index.startOffset = 56400
all_segments.inverted_index.size = 40826036
country.dictionary.startOffset = 40882436
country.dictionary.size = 10
country.forward_index.startOffset = 40882446
country.forward_index.size = 16
device_id.dictionary.startOffset = 40882462
device_id.dictionary.size = 476342
device_id.forward_index.startOffset = 41358804
device_id.forward_index.size = 49688
device_type.dictionary.startOffset = 41408492
device_type.dictionary.size = 15
device_type.forward_index.startOffset = 41408507
device_type.forward_index.size = 16
Segment metadata in server:
all_segments.dictionary.startOffset = 0
all_segments.dictionary.size = 56400
all_segments.inverted_index.startOffset = 56400
all_segments.inverted_index.size = 40826036
country.dictionary.startOffset = 40882436
country.dictionary.size = 10
country.forward_index.startOffset = 40882446
country.forward_index.size = 16
device_id.dictionary.startOffset = 40882462
device_id.dictionary.size = 476342
device_id.forward_index.startOffset = 41358804
device_id.forward_index.size = 49688
device_type.dictionary.startOffset = 41408492
device_type.dictionary.size = 15
device_type.forward_index.startOffset = 41408507
device_type.forward_index.size = 16
all_segments.forward_index.startOffset = 41408523
all_segments.forward_index.size = 149779126
The metadata in the server is identical to the metadata in S3, except a forward index is being created for the all_segments
column (last 2 lines). Why is the server creating the forward index? Table definition in thread. This is with latest Pinot version.Priyank Bagrecha
12/06/2023, 9:40 PMInstance pinot-controller-0.pinot-controller-headless.pinot.svc.cluster.local_9000 is not leader of cluster pinot-dev due to current session 20392a6522d0024 does not match leader session 304c4bf36fb0004
Andres
12/07/2023, 6:22 PM"ingestionConfig": {
"transformConfigs": [
{
"columnName": "all_segments_sorted",
"transformFunction": "arraysortint(all_segments)"
}
]
}
But when creating the segments for an offline table I'm getting this error:
Caused by: java.lang.ClassCastException: class java.util.HashMap cannot be cast to class java.lang.Number (java.util.HashMap and java.lang.Number are in module java.base of loader 'bootstrap')
at org.apache.pinot.common.utils.PinotDataType$14.toInt(PinotDataType.java:770) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.common.utils.PinotDataType.anyToInt(PinotDataType.java:1399) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.common.utils.PinotDataType.toPrimitiveIntArray(PinotDataType.java:1009) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.common.utils.PinotDataType$16.convert(PinotDataType.java:846) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.common.utils.PinotDataType$16.convert(PinotDataType.java:843) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.common.function.FunctionInvoker.convertTypes(FunctionInvoker.java:115) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.segment.local.function.InbuiltFunctionEvaluator$FunctionExecutionNode.execute(InbuiltFunctionEvaluator.java:236) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.segment.local.function.InbuiltFunctionEvaluator.evaluate(InbuiltFunctionEvaluator.java:114) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
at org.apache.pinot.segment.local.recordtransformer.ExpressionTransformer.transform(ExpressionTransformer.java:123) ~[pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-b25f7cf6c165d9de39d79e86894235fca359adab]
... 15 more
The input files are parquet files created by SparkPriyank Bagrecha
12/07/2023, 7:53 PM/INSTANCES/Server_pinot-dev-server-12.pinot-dev-server-headless.pinot.svc.cluster.local_8098/MESSAGES/4fdb8c7d-cc74-4744-8889-4bf8b71b1a15
{
"id": "4fdb8c7d-cc74-4744-8889-4bf8b71b1a15",
"simpleFields": {
"CREATE_TIMESTAMP": "1701467131938",
"ClusterEventName": "IdealStateChange",
"EXE_SESSION_ID": "102e9911d3d0022",
"FROM_STATE": "OFFLINE",
"MSG_ID": "4fdb8c7d-cc74-4744-8889-4bf8b71b1a15",
"MSG_STATE": "read",
"MSG_TYPE": "STATE_TRANSITION",
"PARTITION_NAME": "table_main_OFFLINE_1",
"READ_TIMESTAMP": "1701467131949",
"RESOURCE_NAME": "table_main_OFFLINE",
"RESOURCE_TAG": "table_main_OFFLINE",
"RETRY_COUNT": "3",
"SRC_NAME": "pinot-dev-controller-2.pinot-dev-controller-headless.pinot.svc.cluster.local_9000",
"SRC_SESSION_ID": "304c4bf36fb0004",
"STATE_MODEL_DEF": "SegmentOnlineOfflineStateModel",
"STATE_MODEL_FACTORY_NAME": "DEFAULT",
"TGT_NAME": "Server_pinot-dev-server-12.pinot-dev-server-headless.pinot.svc.cluster.local_8098",
"TGT_SESSION_ID": "102e9911d3d0022",
"TO_STATE": "ONLINE"
},
"mapFields": {},
"listFields": {}
}
Steven Hall
12/07/2023, 10:32 PMPriyank Bagrecha
12/07/2023, 10:49 PM2023-12-07 14:41:44.401
Opening socket connection to server zookeeper.pinot.svc.cluster.local/172.20.11.129:2181. Will not attempt to authenticate using SASL (unknown error)
parth
12/08/2023, 4:54 AMBharath
12/08/2023, 2:49 PMpods pinot-server-7
and pinot-server-8
are always the ones that gets killed and restarts. Checking the logs on these servers this is what I see below. Any help here would be really beneficial.
I'm not a developer (but an infrastructure engineer). So its quite difficult to get into the application level debugging for me.
javax.management.InstanceAlreadyExistsException: kafka.consumer:type=app-info,id=delivery_aggregation_REALTIME-7now-aggregation-0
at com.sun.jmx.mbeanserver.Repository.addMBean(Repository.java:436) ~[?:?]
at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerWithRepository(DefaultMBeanServerInterceptor.java:1855) ~[?:?]
at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerDynamicMBean(DefaultMBeanServerInterceptor.java:955) ~[?:?]
at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerObject(DefaultMBeanServerInterceptor.java:890) ~[?:?]
at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.registerMBean(DefaultMBeanServerInterceptor.java:320) ~[?:?]
at com.sun.jmx.mbeanserver.JmxMBeanServer.registerMBean(JmxMBeanServer.java:522) ~[?:?]
at org.apache.pinot.shaded.org.apache.kafka.common.utils.AppInfoParser.registerAppInfo(AppInfoParser.java:64) [pinot-confluent-avro-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.shaded.org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:814) [pinot-confluent-avro-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.shaded.org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:665) [pinot-confluent-avro-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.shaded.org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:646) [pinot-confluent-avro-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.shaded.org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:626) [pinot-confluent-avro-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.plugin.stream.kafka20.KafkaPartitionLevelConnectionHandler.createConsumer(KafkaPartitionLevelConnectionHandler.java:84) [pinot-kafka-2.0-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.plugin.stream.kafka20.KafkaPartitionLevelConnectionHandler.<init>(KafkaPartitionLevelConnectionHandler.java:70) [pinot-kafka-2.0-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.plugin.stream.kafka20.KafkaStreamMetadataProvider.<init>(KafkaStreamMetadataProvider.java:59) [pinot-kafka-2.0-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.plugin.stream.kafka20.KafkaConsumerFactory.createPartitionMetadataProvider(KafkaConsumerFactory.java:35) [pinot-kafka-2.0-1.1.0-SNAPSHOT-shaded.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.core.data.manager.realtime.RealtimeSegmentDataManager.createPartitionMetadataProvider(RealtimeSegmentDataManager.java:1673) [pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.core.data.manager.realtime.RealtimeSegmentDataManager.<init>(RealtimeSegmentDataManager.java:1494) [pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.core.data.manager.realtime.RealtimeTableDataManager.addSegment(RealtimeTableDataManager.java:442) [pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.server.starter.helix.HelixInstanceDataManager.addRealtimeSegment(HelixInstanceDataManager.java:229) [pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at org.apache.pinot.server.starter.helix.SegmentOnlineOfflineStateModelFactory$SegmentOnlineOfflineStateModel.onBecomeConsumingFromOffline(SegmentOnlineOfflineStateModelFactory.java:80) [pinot-all-1.1.0-SNAPSHOT-jar-with-dependencies.jar:1.1.0-SNAPSHOT-d7dc4c5eff6af90bb1e1385276b70e4685137cc0]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
piby
12/08/2023, 4:09 PM{
"tableName": "xxx",
"tableType": "OFFLINE",
"segmentsConfig": {
"retentionTimeUnit": "DAYS",
"retentionTimeValue": "3650",
"schemaName": "xxx",
"replication": "3",
"timeColumnName": "time"
},
"fieldConfigList": [
{
"name": "location",
"indexes": {
"inverted": {}
}
}
],
"ingestionConfig": {},
"tenants": {},
"tableIndexConfig": {
"loadMode": "MMAP",
"nullHandlingEnabled": "true",
"createInvertedIndexDuringSegmentGeneration": true,
"sortedColumn": [
"location"
]
},
"metadata": {
"customConfigs": {}
}
}