acoustic-printer-83045
04/26/2021, 4:17 AMgray-shoe-75895
04/26/2021, 7:51 AMkafka-topics-ui
and schema-registry-ui
containers were running in my case, although I agree that it likely shouldn't have made a difference.gray-shoe-75895
04/26/2021, 7:52 AM❯ mycli <mysql://datahub:datahub@127.0.0.1/datahub>
MySQL datahub@127.0.0.1:datahub> SELECT * from metadata_aspect;
acoustic-printer-83045
04/26/2021, 2:53 PMbig-carpet-38439
04/27/2021, 2:32 AMbig-carpet-38439
04/27/2021, 2:32 AMacoustic-printer-83045
04/27/2021, 5:26 AMbig-carpet-38439
04/27/2021, 3:40 PMacoustic-printer-83045
04/28/2021, 3:51 AMacoustic-printer-83045
04/28/2021, 3:52 AMacoustic-printer-83045
04/28/2021, 3:53 AMacoustic-printer-83045
04/28/2021, 5:19 AMelasticsearch | {"type": "server", "timestamp": "2021-04-28T05:19:04,014Z", "level": "ERROR", "component": "o.e.x.i.IndexLifecycleRunner", "cluster.name": "docker-cluster", "node.name": "elasticsearch", "message": "policy [kibana-event-log-policy] for index [.kibana-event-log-7.9.3-000001] failed on step [{\"phase\":\"hot\",\"action\":\"rollover\",\"name\":\"check-rollover-ready\"}]. Moving to ERROR step", "cluster.uuid": "hhOxH3oLQEqw9QprYAeHfQ", "node.id": "WI4z8In_QjiGrEQEuMgang" ,
elasticsearch | "stacktrace": ["org.elasticsearch.cluster.block.ClusterBlockException: index [.kibana-event-log-7.9.3-000001] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, index has read-only-allow-delete block];",
elasticsearch | "at org.elasticsearch.cluster.block.ClusterBlocks.indicesBlockedException(ClusterBlocks.java:222) ~[elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.checkBlock(TransportRolloverAction.java:93) ~[elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.checkBlock(TransportRolloverAction.java:60) ~[elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.doStart(TransportMasterNodeAction.java:142) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.start(TransportMasterNodeAction.java:133) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.master.TransportMasterNodeAction.doExecute(TransportMasterNodeAction.java:110) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.master.TransportMasterNodeAction.doExecute(TransportMasterNodeAction.java:59) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:179) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.ActionFilter$Simple.apply(ActionFilter.java:53) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:177) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:155) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:83) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:409) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin(ClientHelper.java:109) [x-pack-core-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.core.ClientHelper.executeWithHeadersAsync(ClientHelper.java:170) [x-pack-core-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.ilm.LifecyclePolicySecurityClient.doExecute(LifecyclePolicySecurityClient.java:51) [x-pack-ilm-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:409) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.client.support.AbstractClient$IndicesAdmin.execute(AbstractClient.java:1274) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.client.support.AbstractClient$IndicesAdmin.rolloverIndex(AbstractClient.java:1786) [elasticsearch-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep.evaluateCondition(WaitForRolloverReadyStep.java:141) [x-pack-core-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.ilm.IndexLifecycleRunner.runPeriodicStep(IndexLifecycleRunner.java:174) [x-pack-ilm-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.ilm.IndexLifecycleService.triggerPolicies(IndexLifecycleService.java:329) [x-pack-ilm-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.ilm.IndexLifecycleService.triggered(IndexLifecycleService.java:267) [x-pack-ilm-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.core.scheduler.SchedulerEngine.notifyListeners(SchedulerEngine.java:183) [x-pack-core-7.9.3.jar:7.9.3]",
elasticsearch | "at org.elasticsearch.xpack.core.scheduler.SchedulerEngine$ActiveSchedule.run(SchedulerEngine.java:211) [x-pack-core-7.9.3.jar:7.9.3]",
elasticsearch | "at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]",
elasticsearch | "at java.util.concurrent.FutureTask.run(FutureTask.java:264) [?:?]",
elasticsearch | "at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) [?:?]",
elasticsearch | "at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) [?:?]",
elasticsearch | "at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630) [?:?]",
elasticsearch | "at java.lang.Thread.run(Thread.java:832) [?:?]"] }
elasticsearch | {"type": "server", "timestamp": "2021-04-28T05:19:04,018Z", "level": "ERROR", "component": "o.e.x.i.IndexLifecycleRunner", "cluster.name": "docker-cluster", "node.name": "elasticsearch", "message": "policy [ilm-history-ilm-policy] for index [ilm-history-2-000001] failed on step [{\"phase\":\"hot\",\"action\":\"rollover\",\"name\":\"check-rollover-ready\"}]. Moving to ERROR step", "cluster.uuid": "hhOxH3oLQEqw9QprYAeHfQ", "node.id": "WI4z8In_QjiGrEQEuMgang" ,
elasticsearch | "stacktrace": ["org.elasticsearch.cluster.block.ClusterBlockException: index [ilm-history-2-000001] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, index has read-only-allow-delete block];",
acoustic-printer-83045
04/28/2021, 5:19 AMacoustic-printer-83045
04/28/2021, 5:20 AMkibana | {"type":"log","@timestamp":"2021-04-28T05:19:35Z","tags":["error","plugins","taskManager","taskManager"],"pid":7,"message":"Failed to poll for work: Error: Request Timeout after 30000ms"}
mysql | 2021-04-28T05:19:49.697620Z 111 [Note] Bad handshake
gray-shoe-75895
04/28/2021, 5:28 AMearly-lamp-41924
04/28/2021, 5:40 AMearly-lamp-41924
04/28/2021, 5:41 AMearly-lamp-41924
04/28/2021, 5:41 AMearly-lamp-41924
04/28/2021, 5:41 AMdocker system prune
early-lamp-41924
04/28/2021, 5:41 AMacoustic-printer-83045
04/28/2021, 2:36 PMacoustic-printer-83045
04/29/2021, 3:13 AM/var/lib/docker
which on my machine defaulted to 64gb. My other primary machine is an osx laptop where it installs in the home folder. Thanks and appologies.early-lamp-41924
04/29/2021, 3:15 AM