We wanted to create some grafana dashboards, do yo...
# troubleshooting
e
We wanted to create some grafana dashboards, do you have a generic one that we can use to monitor pinot (i.e. server disk space for segments, jmx counters for qps, etc.)?
You can enable it by setting
jvmOpts: "-javaagent:/opt/pinot/etc/jmx_prometheus_javaagent/jmx_prometheus_javaagent-0.12.0.jar=8080:/opt/pinot/etc/jmx_prometheus_javaagent/configs/pinot.yml -Xms256M -Xmx4G"
in your helmCharts
values.yml
it will open 8080 port for prometheus to scrape metrics
e
Ok great!
@Xiang Fu, don't we need to set -
Dcom.sun.management.jmxremote.port
and
-Dcom.sun.management.jmxremote.rmi.port
?
x
Yes. Let me paste a full yaml
❤️ 1
You need to add them into annotations
Copy code
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# Default values for Pinot.

image:
  repository: apachepinot/pinot
  tag: latest
  pullPolicy: IfNotPresent

cluster:
  name: pinot-quickstart

controller:
  name: controller
  port: 9000
  replicaCount: 1

  persistence:
    enabled: true
    accessMode: ReadWriteOnce
    size: 30G
    mountPath: /var/pinot/controller/data
    storageClass: ""

  data:
    dir: /var/pinot/controller/data


  vip:
    host: pinot-controller
    port: 9000

  jvmOpts: "-javaagent:/opt/pinot/etc/jmx_prometheus_javaagent/jmx_prometheus_javaagent-0.12.0.jar=8080:/opt/pinot/etc/jmx_prometheus_javaagent/configs/pinot.yml -Xms256M -Xmx4G"

  log4j2ConfFile: /opt/pinot/conf/pinot-controller-log4j2.xml
  pluginsDir: /opt/pinot/plugins

  service:
    annotations:
      "<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
      "<http://prometheus.io/port|prometheus.io/port>": "8080"
    clusterIP: ""
    externalIPs: []
    loadBalancerIP: ""
    loadBalancerSourceRanges: []
    type: NodePort
    port: 9000
    nodePort: ""

  external:
    enabled: false
    type: LoadBalancer
    port: 9000

  resources: {}

  nodeSelector: {}

  tolerations: []

  affinity: {}

  podAnnotations:
    "<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
    "<http://prometheus.io/port|prometheus.io/port>": "8080"

  updateStrategy:
    type: RollingUpdate

broker:
  name: broker

  port: 8099

  replicaCount: 1

  jvmOpts: "-javaagent:/opt/pinot/etc/jmx_prometheus_javaagent/jmx_prometheus_javaagent-0.12.0.jar=8080:/opt/pinot/etc/jmx_prometheus_javaagent/configs/pinot.yml -Xms256M -Xmx4G"

  log4j2ConfFile: /opt/pinot/conf/pinot-broker-log4j2.xml
  pluginsDir: /opt/pinot/plugins

  routingTable:
    builderClass: random

  service:
    annotations:
      "<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
      "<http://prometheus.io/port|prometheus.io/port>": "8080"
    clusterIP: ""
    externalIPs: []
    loadBalancerIP: ""
    loadBalancerSourceRanges: []
    type: NodePort
    port: 8099
    nodePort: ""

  external:
    enabled: false
    type: LoadBalancer
    port: 8099

  resources: {}

  nodeSelector: {}

  affinity: {}

  tolerations: []

  podAnnotations:
    "<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
    "<http://prometheus.io/port|prometheus.io/port>": "8080"

  updateStrategy:
    type: RollingUpdate

server:
  name: server

  ports:
    netty: 8098
    admin: 8097

  replicaCount: 2

  dataDir: /var/pinot/server/data/index
  segmentTarDir: /var/pinot/server/data/segment

  persistence:
    enabled: true
    accessMode: ReadWriteOnce
    size: 30G
    mountPath: /var/pinot/server/data
    storageClass: ""
    #storageClass: "ssd"

  jvmOpts: "-javaagent:/opt/pinot/etc/jmx_prometheus_javaagent/jmx_prometheus_javaagent-0.12.0.jar=8080:/opt/pinot/etc/jmx_prometheus_javaagent/configs/pinot.yml -Xms512M -Xmx4G"

  log4j2ConfFile: /opt/pinot/conf/pinot-server-log4j2.xml
  pluginsDir: /opt/pinot/plugins

  service:
    annotations:
      "<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
      "<http://prometheus.io/port|prometheus.io/port>": "8080"
    clusterIP: ""
    externalIPs: []
    loadBalancerIP: ""
    loadBalancerSourceRanges: []
    type: NodePort
    port: 8098
    nodePort: ""

  resources: {}

  nodeSelector: {}

  affinity: {}

  tolerations: []

  podAnnotations:
    "<http://prometheus.io/scrape|prometheus.io/scrape>": "true"
    "<http://prometheus.io/port|prometheus.io/port>": "8080"

  updateStrategy:
    type: RollingUpdate

# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------

zookeeper:
  ## If true, install the Zookeeper chart alongside Pinot
  ## ref: <https://github.com/kubernetes/charts/tree/master/incubator/zookeeper>
  enabled: true

  ## Configure Zookeeper resource requests and limits
  ## ref: <http://kubernetes.io/docs/user-guide/compute-resources/>
  resources: {}

  ## Replicas
  replicaCount: 1

  ## Environmental variables to set in Zookeeper
  env:
    ## The JVM heap size to allocate to Zookeeper
    ZK_HEAP_SIZE: "256M"

  persistence:
    enabled: true
    ## The amount of PV storage allocated to each Zookeeper pod in the statefulset
    # size: "2Gi"

  ## Specify a Zookeeper imagePullPolicy
  ## ref: <http://kubernetes.io/docs/user-guide/images/#pre-pulling-images>
  image:
    PullPolicy: "IfNotPresent"

  ## If the Zookeeper Chart is disabled a URL and port are required to connect
  url: ""
  port: 2181

  ## Pod scheduling preferences (by default keep pods within a release on separate nodes).
  ## ref: <https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity>
  ## By default we don't set affinity:
  affinity: {}  # Criteria by which pod label-values influence scheduling for zookeeper pods.
  # podAntiAffinity:
  #   requiredDuringSchedulingIgnoredDuringExecution:
  #     - topologyKey: "<http://kubernetes.io/hostname|kubernetes.io/hostname>"
  #       labelSelector:
  #         matchLabels:
  #           release: zookeeper
This is a sample one you can refer to
e
That's great! Thanks so much