提问要多花一点心思哦

elasticsearch采用client.prepareDelete删除数据量大的时候出现 NoNodeAvailableException

匿名 | 发布于2019年10月25日 | 阅读数:1182

删除数据量小的时候未出现该问题,elasticsearch为6.5,jdk1.8
2019-10-25 15:35:34,178 [main] INFO  tempo.spark2.DriverMain - Delete Count: 18030
2019-10-25 15:35:45,044 [task-result-getter-0] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 200.0 in stage 100.0 (TID 1227, hadoop-20.chinacscs.com, executor 2): NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{cG3xgIjuS32Syq9y3bSs0A}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

2019-10-25 15:35:45,317 [task-result-getter-2] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 333.0 in stage 100.0 (TID 1277, hadoop-20.chinacscs.com, executor 2): java.lang.NullPointerException
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

2019-10-25 15:35:45,339 [task-result-getter-1] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 201.1 in stage 100.0 (TID 1274, hadoop-20.chinacscs.com, executor 2): NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{4OhE0tekS02HM0W-Myba6Q}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

2019-10-25 15:35:45,548 [task-result-getter-1] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 200.2 in stage 100.0 (TID 1294, hadoop-20.chinacscs.com, executor 2): NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{gvPxB4dgRlOmssem9WQDpg}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

2019-10-25 15:35:45,676 [task-result-getter-0] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 200.3 in stage 100.0 (TID 1304, hadoop-20.chinacscs.com, executor 2): NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{e20CImXJROOQ6OTjSu4PJg}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

2019-10-25 15:35:45,678 [task-result-getter-0] ERROR org.apache.spark.scheduler.TaskSetManager - Task 200 in stage 100.0 failed 4 times; aborting job
org.apache.spark.SparkException: Job aborted due to stage failure: Task 200 in stage 100.0 failed 4 times, most recent failure: Lost task 200.3 in stage 100.0 (TID 1304, hadoop-20.chinacscs.com, executor 2): NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{e20CImXJROOQ6OTjSu4PJg}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2048)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2067)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2092)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:927)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
at org.apache.spark.rdd.RDD.foreachPartition(RDD.scala:927)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply$mcV$sp(Dataset.scala:2675)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2675)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2675)
at org.apache.spark.sql.Dataset$$anonfun$withNewRDDExecutionId$1.apply(Dataset.scala:3238)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
at org.apache.spark.sql.Dataset.withNewRDDExecutionId(Dataset.scala:3234)
at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2674)
at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2686)
at tempo.spark2.LoadES.delsert(LoadES.java:76)
at tempo.spark2.DriverMain.main(DriverMain.java:148)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:879)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:197)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:227)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:136)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
2019-10-25 15:35:45,715 [task-result-getter-1] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 343.2 in stage 100.0 (TID 1309, hadoop-20.chinacscs.com, executor 2): TaskKilled (Stage cancelled)
2019-10-25 15:35:45,736 [task-result-getter-0] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 333.3 in stage 100.0 (TID 1310, hadoop-20.chinacscs.com, executor 2): TaskKilled (Stage cancelled)
2019-10-25 15:35:45,755 [task-result-getter-3] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 336.3 in stage 100.0 (TID 1312, hadoop-20.chinacscs.com, executor 2): TaskKilled (Stage cancelled)
2019-10-25 15:35:45,784 [task-result-getter-2] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 303.0 in stage 100.0 (TID 1311, hadoop-18.chinacscs.com, executor 4): TaskKilled (Stage cancelled)
ERROR StatusLogger Log4j2 could not find a logging implementation. Please add log4j-core to the classpath. Using SimpleLogger to log to the console...
Caused by: NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{e20CImXJROOQ6OTjSu4PJg}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
2019-10-25 15:35:45,812 [main] ERROR tempo.spark2.DriverMain - org.apache.spark.SparkException: Job aborted due to stage failure: Task 200 in stage 100.0 failed 4 times, most recent failure: Lost task 200.3 in stage 100.0 (TID 1304, hadoop-20.chinacscs.com, executor 2): NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{e20CImXJROOQ6OTjSu4PJg}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2048)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2067)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2092)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:927)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
at org.apache.spark.rdd.RDD.foreachPartition(RDD.scala:927)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply$mcV$sp(Dataset.scala:2675)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2675)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2675)
at org.apache.spark.sql.Dataset$$anonfun$withNewRDDExecutionId$1.apply(Dataset.scala:3238)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
at org.apache.spark.sql.Dataset.withNewRDDExecutionId(Dataset.scala:3234)
at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2674)
at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2686)
at tempo.spark2.LoadES.delsert(LoadES.java:76)
at tempo.spark2.DriverMain.main(DriverMain.java:148)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:879)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:197)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:227)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:136)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: NoNodeAvailableException[None of the configured nodes are available: [{#transport#-1}{e20CImXJROOQ6OTjSu4PJg}{10.100.44.80}{10.100.44.80:9300}]]
at org.elasticsearch.client.transport.TransportClientNodesService.ensureNodesAreAvailable(TransportClientNodesService.java:349)
at org.elasticsearch.client.transport.TransportClientNodesService.execute(TransportClientNodesService.java:247)
at org.elasticsearch.client.transport.TransportProxyClient.execute(TransportProxyClient.java:60)
at org.elasticsearch.client.transport.TransportClient.doExecute(TransportClient.java:382)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:395)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:384)
at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:46)
at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:53)
at tempo.util.ESClient.deleteDocumentById(ESClient.java:79)
at tempo.spark2.LoadES$1.call(LoadES.java:83)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$2.apply(Dataset.scala:2686)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:929)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2067)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

DriverMainError Exception is a general placeholder, scroll up STDOUT to find actual Error!
2019-10-25 15:35:45,820 [task-result-getter-1] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 272.0 in stage 100.0 (TID 1282, hadoop-18.chinacscs.com, executor 4): TaskKilled (Stage cancelled)
2019-10-25 15:35:45,821 [task-result-getter-0] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 289.0 in stage 100.0 (TID 1284, hadoop-18.chinacscs.com, executor 4): TaskKilled (Stage cancelled)
2019-10-25 15:35:45,821 [task-result-getter-3] WARN org.apache.spark.scheduler.TaskSetManager - Lost task 290.0 in stage 100.0 (TID 1293, hadoop-18.chinacscs.com, executor 4): TaskKilled (Stage cancelled)
Exception in thread "main" java.lang.Exception: DriverMainErorr
at tempo.spark2.DriverMain.main(DriverMain.java:183)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:879)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:197)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:227)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:136)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
已邀请:

要回复问题请先登录注册