From a5653baf808a1cc6dcd68d8dd076106f28c48bb0 Mon Sep 17 00:00:00 2001 From: Garrett Jones Date: Wed, 1 Aug 2018 11:25:36 -0700 Subject: [PATCH 1/2] proto/client classes --- .../v1beta2/ClusterControllerGrpc.java | 786 +++ .../dataproc/v1beta2/JobControllerGrpc.java | 789 +++ .../v1beta2/WorkflowTemplateServiceGrpc.java | 998 ++++ .../dataproc/v1beta2/AcceleratorConfig.java | 723 +++ .../v1beta2/AcceleratorConfigOrBuilder.java | 56 + .../dataproc/v1beta2/CancelJobRequest.java | 894 ++++ .../v1beta2/CancelJobRequestOrBuilder.java | 65 + .../cloud/dataproc/v1beta2/Cluster.java | 2438 +++++++++ .../cloud/dataproc/v1beta2/ClusterConfig.java | 2681 ++++++++++ .../v1beta2/ClusterConfigOrBuilder.java | 288 + .../dataproc/v1beta2/ClusterMetrics.java | 1034 ++++ .../v1beta2/ClusterMetricsOrBuilder.java | 117 + .../dataproc/v1beta2/ClusterOperation.java | 807 +++ .../v1beta2/ClusterOperationMetadata.java | 2250 ++++++++ .../ClusterOperationMetadataOrBuilder.java | 239 + .../v1beta2/ClusterOperationOrBuilder.java | 54 + .../v1beta2/ClusterOperationStatus.java | 1226 +++++ .../ClusterOperationStatusOrBuilder.java | 87 + .../dataproc/v1beta2/ClusterOrBuilder.java | 274 + .../dataproc/v1beta2/ClusterSelector.java | 931 ++++ .../v1beta2/ClusterSelectorOrBuilder.java | 92 + .../cloud/dataproc/v1beta2/ClusterStatus.java | 1370 +++++ .../v1beta2/ClusterStatusOrBuilder.java | 88 + .../cloud/dataproc/v1beta2/ClustersProto.java | 476 ++ .../v1beta2/CreateClusterRequest.java | 1179 +++++ .../CreateClusterRequestOrBuilder.java | 106 + .../CreateWorkflowTemplateRequest.java | 820 +++ ...reateWorkflowTemplateRequestOrBuilder.java | 56 + .../v1beta2/DeleteClusterRequest.java | 1267 +++++ .../DeleteClusterRequestOrBuilder.java | 119 + .../dataproc/v1beta2/DeleteJobRequest.java | 894 ++++ .../v1beta2/DeleteJobRequestOrBuilder.java | 65 + .../DeleteWorkflowTemplateRequest.java | 675 +++ ...eleteWorkflowTemplateRequestOrBuilder.java | 42 + .../v1beta2/DiagnoseClusterRequest.java | 894 ++++ .../DiagnoseClusterRequestOrBuilder.java | 65 + .../v1beta2/DiagnoseClusterResults.java | 591 +++ .../DiagnoseClusterResultsOrBuilder.java | 31 + .../cloud/dataproc/v1beta2/DiskConfig.java | 759 +++ .../dataproc/v1beta2/DiskConfigOrBuilder.java | 54 + .../dataproc/v1beta2/GceClusterConfig.java | 2255 ++++++++ .../v1beta2/GceClusterConfigOrBuilder.java | 324 ++ .../dataproc/v1beta2/GetClusterRequest.java | 894 ++++ .../v1beta2/GetClusterRequestOrBuilder.java | 65 + .../cloud/dataproc/v1beta2/GetJobRequest.java | 894 ++++ .../v1beta2/GetJobRequestOrBuilder.java | 65 + .../v1beta2/GetWorkflowTemplateRequest.java | 673 +++ .../GetWorkflowTemplateRequestOrBuilder.java | 42 + .../cloud/dataproc/v1beta2/HadoopJob.java | 2469 +++++++++ .../dataproc/v1beta2/HadoopJobOrBuilder.java | 323 ++ .../cloud/dataproc/v1beta2/HiveJob.java | 1912 +++++++ .../dataproc/v1beta2/HiveJobOrBuilder.java | 236 + .../dataproc/v1beta2/InstanceGroupConfig.java | 2345 +++++++++ .../v1beta2/InstanceGroupConfigOrBuilder.java | 258 + ...tantiateInlineWorkflowTemplateRequest.java | 1017 ++++ ...nlineWorkflowTemplateRequestOrBuilder.java | 86 + .../InstantiateWorkflowTemplateRequest.java | 878 ++++ ...tiateWorkflowTemplateRequestOrBuilder.java | 74 + .../google/cloud/dataproc/v1beta2/Job.java | 4614 +++++++++++++++++ .../cloud/dataproc/v1beta2/JobOrBuilder.java | 498 ++ .../cloud/dataproc/v1beta2/JobPlacement.java | 739 +++ .../v1beta2/JobPlacementOrBuilder.java | 47 + .../cloud/dataproc/v1beta2/JobReference.java | 767 +++ .../v1beta2/JobReferenceOrBuilder.java | 55 + .../cloud/dataproc/v1beta2/JobScheduling.java | 516 ++ .../v1beta2/JobSchedulingOrBuilder.java | 23 + .../cloud/dataproc/v1beta2/JobStatus.java | 1474 ++++++ .../dataproc/v1beta2/JobStatusOrBuilder.java | 90 + .../cloud/dataproc/v1beta2/JobsProto.java | 582 +++ .../dataproc/v1beta2/LifecycleConfig.java | 1247 +++++ .../v1beta2/LifecycleConfigOrBuilder.java | 92 + .../dataproc/v1beta2/ListClustersRequest.java | 1221 +++++ .../v1beta2/ListClustersRequestOrBuilder.java | 120 + .../v1beta2/ListClustersResponse.java | 1031 ++++ .../ListClustersResponseOrBuilder.java | 75 + .../dataproc/v1beta2/ListJobsRequest.java | 1619 ++++++ .../v1beta2/ListJobsRequestOrBuilder.java | 153 + .../dataproc/v1beta2/ListJobsResponse.java | 1031 ++++ .../v1beta2/ListJobsResponseOrBuilder.java | 75 + .../v1beta2/ListWorkflowTemplatesRequest.java | 827 +++ ...ListWorkflowTemplatesRequestOrBuilder.java | 60 + .../ListWorkflowTemplatesResponse.java | 1031 ++++ ...istWorkflowTemplatesResponseOrBuilder.java | 75 + .../cloud/dataproc/v1beta2/LoggingConfig.java | 1203 +++++ .../v1beta2/LoggingConfigOrBuilder.java | 121 + .../dataproc/v1beta2/ManagedCluster.java | 1222 +++++ .../v1beta2/ManagedClusterOrBuilder.java | 144 + .../dataproc/v1beta2/ManagedGroupConfig.java | 739 +++ .../v1beta2/ManagedGroupConfigOrBuilder.java | 47 + .../v1beta2/NodeInitializationAction.java | 844 +++ .../NodeInitializationActionOrBuilder.java | 61 + .../dataproc/v1beta2/OperationsProto.java | 103 + .../cloud/dataproc/v1beta2/OrderedJob.java | 3065 +++++++++++ .../dataproc/v1beta2/OrderedJobOrBuilder.java | 343 ++ .../google/cloud/dataproc/v1beta2/PigJob.java | 2128 ++++++++ .../dataproc/v1beta2/PigJobOrBuilder.java | 257 + .../cloud/dataproc/v1beta2/PySparkJob.java | 2360 +++++++++ .../dataproc/v1beta2/PySparkJobOrBuilder.java | 322 ++ .../cloud/dataproc/v1beta2/QueryList.java | 802 +++ .../dataproc/v1beta2/QueryListOrBuilder.java | 92 + .../cloud/dataproc/v1beta2/RegionName.java | 189 + .../cloud/dataproc/v1beta2/SharedProto.java | 50 + .../dataproc/v1beta2/SoftwareConfig.java | 1081 ++++ .../v1beta2/SoftwareConfigOrBuilder.java | 159 + .../cloud/dataproc/v1beta2/SparkJob.java | 2413 +++++++++ .../dataproc/v1beta2/SparkJobOrBuilder.java | 307 ++ .../cloud/dataproc/v1beta2/SparkSqlJob.java | 2021 ++++++++ .../v1beta2/SparkSqlJobOrBuilder.java | 237 + .../dataproc/v1beta2/SubmitJobRequest.java | 1179 +++++ .../v1beta2/SubmitJobRequestOrBuilder.java | 106 + .../v1beta2/UpdateClusterRequest.java | 2404 +++++++++ .../UpdateClusterRequestOrBuilder.java | 327 ++ .../dataproc/v1beta2/UpdateJobRequest.java | 1412 +++++ .../v1beta2/UpdateJobRequestOrBuilder.java | 130 + .../UpdateWorkflowTemplateRequest.java | 663 +++ ...pdateWorkflowTemplateRequestOrBuilder.java | 37 + .../cloud/dataproc/v1beta2/WorkflowGraph.java | 859 +++ .../v1beta2/WorkflowGraphOrBuilder.java | 53 + .../dataproc/v1beta2/WorkflowMetadata.java | 2084 ++++++++ .../v1beta2/WorkflowMetadataOrBuilder.java | 201 + .../cloud/dataproc/v1beta2/WorkflowNode.java | 1415 +++++ .../v1beta2/WorkflowNodeOrBuilder.java | 115 + .../dataproc/v1beta2/WorkflowTemplate.java | 2401 +++++++++ .../v1beta2/WorkflowTemplateName.java | 212 + .../v1beta2/WorkflowTemplateOrBuilder.java | 284 + .../v1beta2/WorkflowTemplatePlacement.java | 1020 ++++ .../WorkflowTemplatePlacementOrBuilder.java | 67 + .../v1beta2/WorkflowTemplatesProto.java | 449 ++ .../dataproc/v1beta2/YarnApplication.java | 1187 +++++ .../v1beta2/YarnApplicationOrBuilder.java | 77 + .../cloud/dataproc/v1beta2/clusters.proto | 712 +++ .../google/cloud/dataproc/v1beta2/jobs.proto | 767 +++ .../cloud/dataproc/v1beta2/operations.proto | 83 + .../cloud/dataproc/v1beta2/shared.proto | 25 + .../dataproc/v1beta2/workflow_templates.proto | 544 ++ .../v1beta2/ClusterControllerClient.java | 935 ++++ .../v1beta2/ClusterControllerSettings.java | 297 ++ .../dataproc/v1beta2/JobControllerClient.java | 773 +++ .../v1beta2/JobControllerSettings.java | 230 + .../WorkflowTemplateServiceClient.java | 1026 ++++ .../WorkflowTemplateServiceSettings.java | 270 + .../cloud/dataproc/v1beta2/package-info.java | 74 + .../v1beta2/stub/ClusterControllerStub.java | 108 + .../stub/ClusterControllerStubSettings.java | 651 +++ .../GrpcClusterControllerCallableFactory.java | 116 + .../stub/GrpcClusterControllerStub.java | 332 ++ .../GrpcJobControllerCallableFactory.java | 116 + .../v1beta2/stub/GrpcJobControllerStub.java | 252 + ...orkflowTemplateServiceCallableFactory.java | 116 + .../stub/GrpcWorkflowTemplateServiceStub.java | 329 ++ .../v1beta2/stub/JobControllerStub.java | 74 + .../stub/JobControllerStubSettings.java | 452 ++ .../stub/WorkflowTemplateServiceStub.java | 97 + .../WorkflowTemplateServiceStubSettings.java | 551 ++ .../v1beta2/ClusterControllerClientTest.java | 356 ++ .../v1beta2/ClusterControllerSmokeTest.java | 65 + .../v1beta2/JobControllerClientTest.java | 326 ++ .../v1beta2/MockClusterController.java | 57 + .../v1beta2/MockClusterControllerImpl.java | 148 + .../dataproc/v1beta2/MockJobController.java | 57 + .../v1beta2/MockJobControllerImpl.java | 143 + .../v1beta2/MockWorkflowTemplateService.java | 57 + .../MockWorkflowTemplateServiceImpl.java | 151 + .../WorkflowTemplateServiceClientTest.java | 378 ++ 164 files changed, 106912 insertions(+) create mode 100644 google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerGrpc.java create mode 100644 google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java create mode 100644 google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetrics.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetricsOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperation.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadata.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadataOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatus.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatusOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelector.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelectorOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatus.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatusOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResults.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResultsOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacement.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacementOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReference.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReferenceOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobScheduling.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobSchedulingOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatus.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatusOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobsProto.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponse.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponseOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponse.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponseOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedCluster.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedClusterOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationAction.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationActionOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OperationsProto.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryList.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryListOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegionName.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SharedProto.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJobOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequest.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequestOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraph.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraphOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadata.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadataOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNode.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNodeOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplate.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateName.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacement.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacementOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatesProto.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplication.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplicationOrBuilder.java create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClient.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSettings.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerClient.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerSettings.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClient.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceSettings.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/package-info.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStub.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStubSettings.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerCallableFactory.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerStub.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerCallableFactory.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerStub.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceCallableFactory.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceStub.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStub.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStubSettings.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStub.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStubSettings.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClientTest.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSmokeTest.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/JobControllerClientTest.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterController.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterControllerImpl.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobController.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobControllerImpl.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateService.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateServiceImpl.java create mode 100644 google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClientTest.java diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerGrpc.java new file mode 100644 index 000000000000..0f31a92db3e5 --- /dev/null +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerGrpc.java @@ -0,0 +1,786 @@ +package com.google.cloud.dataproc.v1beta2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; +import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ClientCalls.asyncClientStreamingCall; +import static io.grpc.stub.ClientCalls.asyncServerStreamingCall; +import static io.grpc.stub.ClientCalls.asyncUnaryCall; +import static io.grpc.stub.ClientCalls.blockingServerStreamingCall; +import static io.grpc.stub.ClientCalls.blockingUnaryCall; +import static io.grpc.stub.ClientCalls.futureUnaryCall; +import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ServerCalls.asyncClientStreamingCall; +import static io.grpc.stub.ServerCalls.asyncServerStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnaryCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; + +/** + *
+ * The ClusterControllerService provides methods to manage clusters
+ * of Compute Engine instances.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/dataproc/v1beta2/clusters.proto") +public final class ClusterControllerGrpc { + + private ClusterControllerGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1beta2.ClusterController"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getCreateClusterMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_CREATE_CLUSTER = getCreateClusterMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getCreateClusterMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getCreateClusterMethod() { + return getCreateClusterMethodHelper(); + } + + private static io.grpc.MethodDescriptor getCreateClusterMethodHelper() { + io.grpc.MethodDescriptor getCreateClusterMethod; + if ((getCreateClusterMethod = ClusterControllerGrpc.getCreateClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getCreateClusterMethod = ClusterControllerGrpc.getCreateClusterMethod) == null) { + ClusterControllerGrpc.getCreateClusterMethod = getCreateClusterMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.ClusterController", "CreateCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.CreateClusterRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new ClusterControllerMethodDescriptorSupplier("CreateCluster")) + .build(); + } + } + } + return getCreateClusterMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getUpdateClusterMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_UPDATE_CLUSTER = getUpdateClusterMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getUpdateClusterMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getUpdateClusterMethod() { + return getUpdateClusterMethodHelper(); + } + + private static io.grpc.MethodDescriptor getUpdateClusterMethodHelper() { + io.grpc.MethodDescriptor getUpdateClusterMethod; + if ((getUpdateClusterMethod = ClusterControllerGrpc.getUpdateClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getUpdateClusterMethod = ClusterControllerGrpc.getUpdateClusterMethod) == null) { + ClusterControllerGrpc.getUpdateClusterMethod = getUpdateClusterMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.ClusterController", "UpdateCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new ClusterControllerMethodDescriptorSupplier("UpdateCluster")) + .build(); + } + } + } + return getUpdateClusterMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getDeleteClusterMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_DELETE_CLUSTER = getDeleteClusterMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getDeleteClusterMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getDeleteClusterMethod() { + return getDeleteClusterMethodHelper(); + } + + private static io.grpc.MethodDescriptor getDeleteClusterMethodHelper() { + io.grpc.MethodDescriptor getDeleteClusterMethod; + if ((getDeleteClusterMethod = ClusterControllerGrpc.getDeleteClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getDeleteClusterMethod = ClusterControllerGrpc.getDeleteClusterMethod) == null) { + ClusterControllerGrpc.getDeleteClusterMethod = getDeleteClusterMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.ClusterController", "DeleteCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new ClusterControllerMethodDescriptorSupplier("DeleteCluster")) + .build(); + } + } + } + return getDeleteClusterMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getGetClusterMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_GET_CLUSTER = getGetClusterMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getGetClusterMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getGetClusterMethod() { + return getGetClusterMethodHelper(); + } + + private static io.grpc.MethodDescriptor getGetClusterMethodHelper() { + io.grpc.MethodDescriptor getGetClusterMethod; + if ((getGetClusterMethod = ClusterControllerGrpc.getGetClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getGetClusterMethod = ClusterControllerGrpc.getGetClusterMethod) == null) { + ClusterControllerGrpc.getGetClusterMethod = getGetClusterMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.ClusterController", "GetCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.GetClusterRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance())) + .setSchemaDescriptor(new ClusterControllerMethodDescriptorSupplier("GetCluster")) + .build(); + } + } + } + return getGetClusterMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getListClustersMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_LIST_CLUSTERS = getListClustersMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getListClustersMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getListClustersMethod() { + return getListClustersMethodHelper(); + } + + private static io.grpc.MethodDescriptor getListClustersMethodHelper() { + io.grpc.MethodDescriptor getListClustersMethod; + if ((getListClustersMethod = ClusterControllerGrpc.getListClustersMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getListClustersMethod = ClusterControllerGrpc.getListClustersMethod) == null) { + ClusterControllerGrpc.getListClustersMethod = getListClustersMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.ClusterController", "ListClusters")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.ListClustersRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.ListClustersResponse.getDefaultInstance())) + .setSchemaDescriptor(new ClusterControllerMethodDescriptorSupplier("ListClusters")) + .build(); + } + } + } + return getListClustersMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getDiagnoseClusterMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_DIAGNOSE_CLUSTER = getDiagnoseClusterMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getDiagnoseClusterMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getDiagnoseClusterMethod() { + return getDiagnoseClusterMethodHelper(); + } + + private static io.grpc.MethodDescriptor getDiagnoseClusterMethodHelper() { + io.grpc.MethodDescriptor getDiagnoseClusterMethod; + if ((getDiagnoseClusterMethod = ClusterControllerGrpc.getDiagnoseClusterMethod) == null) { + synchronized (ClusterControllerGrpc.class) { + if ((getDiagnoseClusterMethod = ClusterControllerGrpc.getDiagnoseClusterMethod) == null) { + ClusterControllerGrpc.getDiagnoseClusterMethod = getDiagnoseClusterMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.ClusterController", "DiagnoseCluster")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new ClusterControllerMethodDescriptorSupplier("DiagnoseCluster")) + .build(); + } + } + } + return getDiagnoseClusterMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static ClusterControllerStub newStub(io.grpc.Channel channel) { + return new ClusterControllerStub(channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static ClusterControllerBlockingStub newBlockingStub( + io.grpc.Channel channel) { + return new ClusterControllerBlockingStub(channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static ClusterControllerFutureStub newFutureStub( + io.grpc.Channel channel) { + return new ClusterControllerFutureStub(channel); + } + + /** + *
+   * The ClusterControllerService provides methods to manage clusters
+   * of Compute Engine instances.
+   * 
+ */ + public static abstract class ClusterControllerImplBase implements io.grpc.BindableService { + + /** + *
+     * Creates a cluster in a project.
+     * 
+ */ + public void createCluster(com.google.cloud.dataproc.v1beta2.CreateClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getCreateClusterMethodHelper(), responseObserver); + } + + /** + *
+     * Updates a cluster in a project.
+     * 
+ */ + public void updateCluster(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getUpdateClusterMethodHelper(), responseObserver); + } + + /** + *
+     * Deletes a cluster in a project.
+     * 
+ */ + public void deleteCluster(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getDeleteClusterMethodHelper(), responseObserver); + } + + /** + *
+     * Gets the resource representation for a cluster in a project.
+     * 
+ */ + public void getCluster(com.google.cloud.dataproc.v1beta2.GetClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getGetClusterMethodHelper(), responseObserver); + } + + /** + *
+     * Lists all regions/{region}/clusters in a project.
+     * 
+ */ + public void listClusters(com.google.cloud.dataproc.v1beta2.ListClustersRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getListClustersMethodHelper(), responseObserver); + } + + /** + *
+     * Gets cluster diagnostic information.
+     * After the operation completes, the Operation.response field
+     * contains `DiagnoseClusterOutputLocation`.
+     * 
+ */ + public void diagnoseCluster(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getDiagnoseClusterMethodHelper(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateClusterMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.CreateClusterRequest, + com.google.longrunning.Operation>( + this, METHODID_CREATE_CLUSTER))) + .addMethod( + getUpdateClusterMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest, + com.google.longrunning.Operation>( + this, METHODID_UPDATE_CLUSTER))) + .addMethod( + getDeleteClusterMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest, + com.google.longrunning.Operation>( + this, METHODID_DELETE_CLUSTER))) + .addMethod( + getGetClusterMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.GetClusterRequest, + com.google.cloud.dataproc.v1beta2.Cluster>( + this, METHODID_GET_CLUSTER))) + .addMethod( + getListClustersMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.ListClustersRequest, + com.google.cloud.dataproc.v1beta2.ListClustersResponse>( + this, METHODID_LIST_CLUSTERS))) + .addMethod( + getDiagnoseClusterMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest, + com.google.longrunning.Operation>( + this, METHODID_DIAGNOSE_CLUSTER))) + .build(); + } + } + + /** + *
+   * The ClusterControllerService provides methods to manage clusters
+   * of Compute Engine instances.
+   * 
+ */ + public static final class ClusterControllerStub extends io.grpc.stub.AbstractStub { + private ClusterControllerStub(io.grpc.Channel channel) { + super(channel); + } + + private ClusterControllerStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ClusterControllerStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new ClusterControllerStub(channel, callOptions); + } + + /** + *
+     * Creates a cluster in a project.
+     * 
+ */ + public void createCluster(com.google.cloud.dataproc.v1beta2.CreateClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getCreateClusterMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Updates a cluster in a project.
+     * 
+ */ + public void updateCluster(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getUpdateClusterMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Deletes a cluster in a project.
+     * 
+ */ + public void deleteCluster(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getDeleteClusterMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Gets the resource representation for a cluster in a project.
+     * 
+ */ + public void getCluster(com.google.cloud.dataproc.v1beta2.GetClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getGetClusterMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Lists all regions/{region}/clusters in a project.
+     * 
+ */ + public void listClusters(com.google.cloud.dataproc.v1beta2.ListClustersRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getListClustersMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Gets cluster diagnostic information.
+     * After the operation completes, the Operation.response field
+     * contains `DiagnoseClusterOutputLocation`.
+     * 
+ */ + public void diagnoseCluster(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getDiagnoseClusterMethodHelper(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * The ClusterControllerService provides methods to manage clusters
+   * of Compute Engine instances.
+   * 
+ */ + public static final class ClusterControllerBlockingStub extends io.grpc.stub.AbstractStub { + private ClusterControllerBlockingStub(io.grpc.Channel channel) { + super(channel); + } + + private ClusterControllerBlockingStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ClusterControllerBlockingStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new ClusterControllerBlockingStub(channel, callOptions); + } + + /** + *
+     * Creates a cluster in a project.
+     * 
+ */ + public com.google.longrunning.Operation createCluster(com.google.cloud.dataproc.v1beta2.CreateClusterRequest request) { + return blockingUnaryCall( + getChannel(), getCreateClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Updates a cluster in a project.
+     * 
+ */ + public com.google.longrunning.Operation updateCluster(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request) { + return blockingUnaryCall( + getChannel(), getUpdateClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Deletes a cluster in a project.
+     * 
+ */ + public com.google.longrunning.Operation deleteCluster(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request) { + return blockingUnaryCall( + getChannel(), getDeleteClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Gets the resource representation for a cluster in a project.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.Cluster getCluster(com.google.cloud.dataproc.v1beta2.GetClusterRequest request) { + return blockingUnaryCall( + getChannel(), getGetClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Lists all regions/{region}/clusters in a project.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.ListClustersResponse listClusters(com.google.cloud.dataproc.v1beta2.ListClustersRequest request) { + return blockingUnaryCall( + getChannel(), getListClustersMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Gets cluster diagnostic information.
+     * After the operation completes, the Operation.response field
+     * contains `DiagnoseClusterOutputLocation`.
+     * 
+ */ + public com.google.longrunning.Operation diagnoseCluster(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request) { + return blockingUnaryCall( + getChannel(), getDiagnoseClusterMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+   * The ClusterControllerService provides methods to manage clusters
+   * of Compute Engine instances.
+   * 
+ */ + public static final class ClusterControllerFutureStub extends io.grpc.stub.AbstractStub { + private ClusterControllerFutureStub(io.grpc.Channel channel) { + super(channel); + } + + private ClusterControllerFutureStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected ClusterControllerFutureStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new ClusterControllerFutureStub(channel, callOptions); + } + + /** + *
+     * Creates a cluster in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture createCluster( + com.google.cloud.dataproc.v1beta2.CreateClusterRequest request) { + return futureUnaryCall( + getChannel().newCall(getCreateClusterMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Updates a cluster in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture updateCluster( + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request) { + return futureUnaryCall( + getChannel().newCall(getUpdateClusterMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Deletes a cluster in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture deleteCluster( + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request) { + return futureUnaryCall( + getChannel().newCall(getDeleteClusterMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Gets the resource representation for a cluster in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getCluster( + com.google.cloud.dataproc.v1beta2.GetClusterRequest request) { + return futureUnaryCall( + getChannel().newCall(getGetClusterMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Lists all regions/{region}/clusters in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture listClusters( + com.google.cloud.dataproc.v1beta2.ListClustersRequest request) { + return futureUnaryCall( + getChannel().newCall(getListClustersMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Gets cluster diagnostic information.
+     * After the operation completes, the Operation.response field
+     * contains `DiagnoseClusterOutputLocation`.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture diagnoseCluster( + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request) { + return futureUnaryCall( + getChannel().newCall(getDiagnoseClusterMethodHelper(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_CLUSTER = 0; + private static final int METHODID_UPDATE_CLUSTER = 1; + private static final int METHODID_DELETE_CLUSTER = 2; + private static final int METHODID_GET_CLUSTER = 3; + private static final int METHODID_LIST_CLUSTERS = 4; + private static final int METHODID_DIAGNOSE_CLUSTER = 5; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final ClusterControllerImplBase serviceImpl; + private final int methodId; + + MethodHandlers(ClusterControllerImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_CLUSTER: + serviceImpl.createCluster((com.google.cloud.dataproc.v1beta2.CreateClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_CLUSTER: + serviceImpl.updateCluster((com.google.cloud.dataproc.v1beta2.UpdateClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_CLUSTER: + serviceImpl.deleteCluster((com.google.cloud.dataproc.v1beta2.DeleteClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_CLUSTER: + serviceImpl.getCluster((com.google.cloud.dataproc.v1beta2.GetClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_CLUSTERS: + serviceImpl.listClusters((com.google.cloud.dataproc.v1beta2.ListClustersRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DIAGNOSE_CLUSTER: + serviceImpl.diagnoseCluster((com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class ClusterControllerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + ClusterControllerBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("ClusterController"); + } + } + + private static final class ClusterControllerFileDescriptorSupplier + extends ClusterControllerBaseDescriptorSupplier { + ClusterControllerFileDescriptorSupplier() {} + } + + private static final class ClusterControllerMethodDescriptorSupplier + extends ClusterControllerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + ClusterControllerMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (ClusterControllerGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new ClusterControllerFileDescriptorSupplier()) + .addMethod(getCreateClusterMethodHelper()) + .addMethod(getUpdateClusterMethodHelper()) + .addMethod(getDeleteClusterMethodHelper()) + .addMethod(getGetClusterMethodHelper()) + .addMethod(getListClustersMethodHelper()) + .addMethod(getDiagnoseClusterMethodHelper()) + .build(); + } + } + } + return result; + } +} diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java new file mode 100644 index 000000000000..f2933105497a --- /dev/null +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java @@ -0,0 +1,789 @@ +package com.google.cloud.dataproc.v1beta2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; +import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ClientCalls.asyncClientStreamingCall; +import static io.grpc.stub.ClientCalls.asyncServerStreamingCall; +import static io.grpc.stub.ClientCalls.asyncUnaryCall; +import static io.grpc.stub.ClientCalls.blockingServerStreamingCall; +import static io.grpc.stub.ClientCalls.blockingUnaryCall; +import static io.grpc.stub.ClientCalls.futureUnaryCall; +import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ServerCalls.asyncClientStreamingCall; +import static io.grpc.stub.ServerCalls.asyncServerStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnaryCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; + +/** + *
+ * The JobController provides methods to manage jobs.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/dataproc/v1beta2/jobs.proto") +public final class JobControllerGrpc { + + private JobControllerGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1beta2.JobController"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getSubmitJobMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_SUBMIT_JOB = getSubmitJobMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getSubmitJobMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getSubmitJobMethod() { + return getSubmitJobMethodHelper(); + } + + private static io.grpc.MethodDescriptor getSubmitJobMethodHelper() { + io.grpc.MethodDescriptor getSubmitJobMethod; + if ((getSubmitJobMethod = JobControllerGrpc.getSubmitJobMethod) == null) { + synchronized (JobControllerGrpc.class) { + if ((getSubmitJobMethod = JobControllerGrpc.getSubmitJobMethod) == null) { + JobControllerGrpc.getSubmitJobMethod = getSubmitJobMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.JobController", "SubmitJob")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.SubmitJobRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance())) + .setSchemaDescriptor(new JobControllerMethodDescriptorSupplier("SubmitJob")) + .build(); + } + } + } + return getSubmitJobMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getGetJobMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_GET_JOB = getGetJobMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getGetJobMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getGetJobMethod() { + return getGetJobMethodHelper(); + } + + private static io.grpc.MethodDescriptor getGetJobMethodHelper() { + io.grpc.MethodDescriptor getGetJobMethod; + if ((getGetJobMethod = JobControllerGrpc.getGetJobMethod) == null) { + synchronized (JobControllerGrpc.class) { + if ((getGetJobMethod = JobControllerGrpc.getGetJobMethod) == null) { + JobControllerGrpc.getGetJobMethod = getGetJobMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.JobController", "GetJob")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.GetJobRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance())) + .setSchemaDescriptor(new JobControllerMethodDescriptorSupplier("GetJob")) + .build(); + } + } + } + return getGetJobMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getListJobsMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_LIST_JOBS = getListJobsMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getListJobsMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getListJobsMethod() { + return getListJobsMethodHelper(); + } + + private static io.grpc.MethodDescriptor getListJobsMethodHelper() { + io.grpc.MethodDescriptor getListJobsMethod; + if ((getListJobsMethod = JobControllerGrpc.getListJobsMethod) == null) { + synchronized (JobControllerGrpc.class) { + if ((getListJobsMethod = JobControllerGrpc.getListJobsMethod) == null) { + JobControllerGrpc.getListJobsMethod = getListJobsMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.JobController", "ListJobs")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.ListJobsRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.ListJobsResponse.getDefaultInstance())) + .setSchemaDescriptor(new JobControllerMethodDescriptorSupplier("ListJobs")) + .build(); + } + } + } + return getListJobsMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getUpdateJobMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_UPDATE_JOB = getUpdateJobMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getUpdateJobMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getUpdateJobMethod() { + return getUpdateJobMethodHelper(); + } + + private static io.grpc.MethodDescriptor getUpdateJobMethodHelper() { + io.grpc.MethodDescriptor getUpdateJobMethod; + if ((getUpdateJobMethod = JobControllerGrpc.getUpdateJobMethod) == null) { + synchronized (JobControllerGrpc.class) { + if ((getUpdateJobMethod = JobControllerGrpc.getUpdateJobMethod) == null) { + JobControllerGrpc.getUpdateJobMethod = getUpdateJobMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.JobController", "UpdateJob")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.UpdateJobRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance())) + .setSchemaDescriptor(new JobControllerMethodDescriptorSupplier("UpdateJob")) + .build(); + } + } + } + return getUpdateJobMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getCancelJobMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_CANCEL_JOB = getCancelJobMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getCancelJobMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getCancelJobMethod() { + return getCancelJobMethodHelper(); + } + + private static io.grpc.MethodDescriptor getCancelJobMethodHelper() { + io.grpc.MethodDescriptor getCancelJobMethod; + if ((getCancelJobMethod = JobControllerGrpc.getCancelJobMethod) == null) { + synchronized (JobControllerGrpc.class) { + if ((getCancelJobMethod = JobControllerGrpc.getCancelJobMethod) == null) { + JobControllerGrpc.getCancelJobMethod = getCancelJobMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.JobController", "CancelJob")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.CancelJobRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance())) + .setSchemaDescriptor(new JobControllerMethodDescriptorSupplier("CancelJob")) + .build(); + } + } + } + return getCancelJobMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getDeleteJobMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_DELETE_JOB = getDeleteJobMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getDeleteJobMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getDeleteJobMethod() { + return getDeleteJobMethodHelper(); + } + + private static io.grpc.MethodDescriptor getDeleteJobMethodHelper() { + io.grpc.MethodDescriptor getDeleteJobMethod; + if ((getDeleteJobMethod = JobControllerGrpc.getDeleteJobMethod) == null) { + synchronized (JobControllerGrpc.class) { + if ((getDeleteJobMethod = JobControllerGrpc.getDeleteJobMethod) == null) { + JobControllerGrpc.getDeleteJobMethod = getDeleteJobMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.JobController", "DeleteJob")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.DeleteJobRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new JobControllerMethodDescriptorSupplier("DeleteJob")) + .build(); + } + } + } + return getDeleteJobMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static JobControllerStub newStub(io.grpc.Channel channel) { + return new JobControllerStub(channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static JobControllerBlockingStub newBlockingStub( + io.grpc.Channel channel) { + return new JobControllerBlockingStub(channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static JobControllerFutureStub newFutureStub( + io.grpc.Channel channel) { + return new JobControllerFutureStub(channel); + } + + /** + *
+   * The JobController provides methods to manage jobs.
+   * 
+ */ + public static abstract class JobControllerImplBase implements io.grpc.BindableService { + + /** + *
+     * Submits a job to a cluster.
+     * 
+ */ + public void submitJob(com.google.cloud.dataproc.v1beta2.SubmitJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getSubmitJobMethodHelper(), responseObserver); + } + + /** + *
+     * Gets the resource representation for a job in a project.
+     * 
+ */ + public void getJob(com.google.cloud.dataproc.v1beta2.GetJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getGetJobMethodHelper(), responseObserver); + } + + /** + *
+     * Lists regions/{region}/jobs in a project.
+     * 
+ */ + public void listJobs(com.google.cloud.dataproc.v1beta2.ListJobsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getListJobsMethodHelper(), responseObserver); + } + + /** + *
+     * Updates a job in a project.
+     * 
+ */ + public void updateJob(com.google.cloud.dataproc.v1beta2.UpdateJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getUpdateJobMethodHelper(), responseObserver); + } + + /** + *
+     * Starts a job cancellation request. To access the job resource
+     * after cancellation, call
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+     * 
+ */ + public void cancelJob(com.google.cloud.dataproc.v1beta2.CancelJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getCancelJobMethodHelper(), responseObserver); + } + + /** + *
+     * Deletes the job from the project. If the job is active, the delete fails,
+     * and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public void deleteJob(com.google.cloud.dataproc.v1beta2.DeleteJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getDeleteJobMethodHelper(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getSubmitJobMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.SubmitJobRequest, + com.google.cloud.dataproc.v1beta2.Job>( + this, METHODID_SUBMIT_JOB))) + .addMethod( + getGetJobMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.GetJobRequest, + com.google.cloud.dataproc.v1beta2.Job>( + this, METHODID_GET_JOB))) + .addMethod( + getListJobsMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.ListJobsRequest, + com.google.cloud.dataproc.v1beta2.ListJobsResponse>( + this, METHODID_LIST_JOBS))) + .addMethod( + getUpdateJobMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.UpdateJobRequest, + com.google.cloud.dataproc.v1beta2.Job>( + this, METHODID_UPDATE_JOB))) + .addMethod( + getCancelJobMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.CancelJobRequest, + com.google.cloud.dataproc.v1beta2.Job>( + this, METHODID_CANCEL_JOB))) + .addMethod( + getDeleteJobMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.DeleteJobRequest, + com.google.protobuf.Empty>( + this, METHODID_DELETE_JOB))) + .build(); + } + } + + /** + *
+   * The JobController provides methods to manage jobs.
+   * 
+ */ + public static final class JobControllerStub extends io.grpc.stub.AbstractStub { + private JobControllerStub(io.grpc.Channel channel) { + super(channel); + } + + private JobControllerStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected JobControllerStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new JobControllerStub(channel, callOptions); + } + + /** + *
+     * Submits a job to a cluster.
+     * 
+ */ + public void submitJob(com.google.cloud.dataproc.v1beta2.SubmitJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getSubmitJobMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Gets the resource representation for a job in a project.
+     * 
+ */ + public void getJob(com.google.cloud.dataproc.v1beta2.GetJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getGetJobMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Lists regions/{region}/jobs in a project.
+     * 
+ */ + public void listJobs(com.google.cloud.dataproc.v1beta2.ListJobsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getListJobsMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Updates a job in a project.
+     * 
+ */ + public void updateJob(com.google.cloud.dataproc.v1beta2.UpdateJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getUpdateJobMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Starts a job cancellation request. To access the job resource
+     * after cancellation, call
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+     * 
+ */ + public void cancelJob(com.google.cloud.dataproc.v1beta2.CancelJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getCancelJobMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Deletes the job from the project. If the job is active, the delete fails,
+     * and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public void deleteJob(com.google.cloud.dataproc.v1beta2.DeleteJobRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getDeleteJobMethodHelper(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * The JobController provides methods to manage jobs.
+   * 
+ */ + public static final class JobControllerBlockingStub extends io.grpc.stub.AbstractStub { + private JobControllerBlockingStub(io.grpc.Channel channel) { + super(channel); + } + + private JobControllerBlockingStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected JobControllerBlockingStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new JobControllerBlockingStub(channel, callOptions); + } + + /** + *
+     * Submits a job to a cluster.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.Job submitJob(com.google.cloud.dataproc.v1beta2.SubmitJobRequest request) { + return blockingUnaryCall( + getChannel(), getSubmitJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Gets the resource representation for a job in a project.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.Job getJob(com.google.cloud.dataproc.v1beta2.GetJobRequest request) { + return blockingUnaryCall( + getChannel(), getGetJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Lists regions/{region}/jobs in a project.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.ListJobsResponse listJobs(com.google.cloud.dataproc.v1beta2.ListJobsRequest request) { + return blockingUnaryCall( + getChannel(), getListJobsMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Updates a job in a project.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.Job updateJob(com.google.cloud.dataproc.v1beta2.UpdateJobRequest request) { + return blockingUnaryCall( + getChannel(), getUpdateJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Starts a job cancellation request. To access the job resource
+     * after cancellation, call
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.Job cancelJob(com.google.cloud.dataproc.v1beta2.CancelJobRequest request) { + return blockingUnaryCall( + getChannel(), getCancelJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Deletes the job from the project. If the job is active, the delete fails,
+     * and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public com.google.protobuf.Empty deleteJob(com.google.cloud.dataproc.v1beta2.DeleteJobRequest request) { + return blockingUnaryCall( + getChannel(), getDeleteJobMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+   * The JobController provides methods to manage jobs.
+   * 
+ */ + public static final class JobControllerFutureStub extends io.grpc.stub.AbstractStub { + private JobControllerFutureStub(io.grpc.Channel channel) { + super(channel); + } + + private JobControllerFutureStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected JobControllerFutureStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new JobControllerFutureStub(channel, callOptions); + } + + /** + *
+     * Submits a job to a cluster.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture submitJob( + com.google.cloud.dataproc.v1beta2.SubmitJobRequest request) { + return futureUnaryCall( + getChannel().newCall(getSubmitJobMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Gets the resource representation for a job in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getJob( + com.google.cloud.dataproc.v1beta2.GetJobRequest request) { + return futureUnaryCall( + getChannel().newCall(getGetJobMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Lists regions/{region}/jobs in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture listJobs( + com.google.cloud.dataproc.v1beta2.ListJobsRequest request) { + return futureUnaryCall( + getChannel().newCall(getListJobsMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Updates a job in a project.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture updateJob( + com.google.cloud.dataproc.v1beta2.UpdateJobRequest request) { + return futureUnaryCall( + getChannel().newCall(getUpdateJobMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Starts a job cancellation request. To access the job resource
+     * after cancellation, call
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture cancelJob( + com.google.cloud.dataproc.v1beta2.CancelJobRequest request) { + return futureUnaryCall( + getChannel().newCall(getCancelJobMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Deletes the job from the project. If the job is active, the delete fails,
+     * and the response returns `FAILED_PRECONDITION`.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture deleteJob( + com.google.cloud.dataproc.v1beta2.DeleteJobRequest request) { + return futureUnaryCall( + getChannel().newCall(getDeleteJobMethodHelper(), getCallOptions()), request); + } + } + + private static final int METHODID_SUBMIT_JOB = 0; + private static final int METHODID_GET_JOB = 1; + private static final int METHODID_LIST_JOBS = 2; + private static final int METHODID_UPDATE_JOB = 3; + private static final int METHODID_CANCEL_JOB = 4; + private static final int METHODID_DELETE_JOB = 5; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final JobControllerImplBase serviceImpl; + private final int methodId; + + MethodHandlers(JobControllerImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_SUBMIT_JOB: + serviceImpl.submitJob((com.google.cloud.dataproc.v1beta2.SubmitJobRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_JOB: + serviceImpl.getJob((com.google.cloud.dataproc.v1beta2.GetJobRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_JOBS: + serviceImpl.listJobs((com.google.cloud.dataproc.v1beta2.ListJobsRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_JOB: + serviceImpl.updateJob((com.google.cloud.dataproc.v1beta2.UpdateJobRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_CANCEL_JOB: + serviceImpl.cancelJob((com.google.cloud.dataproc.v1beta2.CancelJobRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_JOB: + serviceImpl.deleteJob((com.google.cloud.dataproc.v1beta2.DeleteJobRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class JobControllerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + JobControllerBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("JobController"); + } + } + + private static final class JobControllerFileDescriptorSupplier + extends JobControllerBaseDescriptorSupplier { + JobControllerFileDescriptorSupplier() {} + } + + private static final class JobControllerMethodDescriptorSupplier + extends JobControllerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + JobControllerMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (JobControllerGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new JobControllerFileDescriptorSupplier()) + .addMethod(getSubmitJobMethodHelper()) + .addMethod(getGetJobMethodHelper()) + .addMethod(getListJobsMethodHelper()) + .addMethod(getUpdateJobMethodHelper()) + .addMethod(getCancelJobMethodHelper()) + .addMethod(getDeleteJobMethodHelper()) + .build(); + } + } + } + return result; + } +} diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java new file mode 100644 index 000000000000..0841db97b201 --- /dev/null +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java @@ -0,0 +1,998 @@ +package com.google.cloud.dataproc.v1beta2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; +import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ClientCalls.asyncClientStreamingCall; +import static io.grpc.stub.ClientCalls.asyncServerStreamingCall; +import static io.grpc.stub.ClientCalls.asyncUnaryCall; +import static io.grpc.stub.ClientCalls.blockingServerStreamingCall; +import static io.grpc.stub.ClientCalls.blockingUnaryCall; +import static io.grpc.stub.ClientCalls.futureUnaryCall; +import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ServerCalls.asyncClientStreamingCall; +import static io.grpc.stub.ServerCalls.asyncServerStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnaryCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; + +/** + *
+ * The API interface for managing Workflow Templates in the
+ * Cloud Dataproc API.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/dataproc/v1beta2/workflow_templates.proto") +public final class WorkflowTemplateServiceGrpc { + + private WorkflowTemplateServiceGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1beta2.WorkflowTemplateService"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getCreateWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_CREATE_WORKFLOW_TEMPLATE = getCreateWorkflowTemplateMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getCreateWorkflowTemplateMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getCreateWorkflowTemplateMethod() { + return getCreateWorkflowTemplateMethodHelper(); + } + + private static io.grpc.MethodDescriptor getCreateWorkflowTemplateMethodHelper() { + io.grpc.MethodDescriptor getCreateWorkflowTemplateMethod; + if ((getCreateWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getCreateWorkflowTemplateMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getCreateWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getCreateWorkflowTemplateMethod) == null) { + WorkflowTemplateServiceGrpc.getCreateWorkflowTemplateMethod = getCreateWorkflowTemplateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "CreateWorkflowTemplate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("CreateWorkflowTemplate")) + .build(); + } + } + } + return getCreateWorkflowTemplateMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getGetWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_GET_WORKFLOW_TEMPLATE = getGetWorkflowTemplateMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getGetWorkflowTemplateMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getGetWorkflowTemplateMethod() { + return getGetWorkflowTemplateMethodHelper(); + } + + private static io.grpc.MethodDescriptor getGetWorkflowTemplateMethodHelper() { + io.grpc.MethodDescriptor getGetWorkflowTemplateMethod; + if ((getGetWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getGetWorkflowTemplateMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getGetWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getGetWorkflowTemplateMethod) == null) { + WorkflowTemplateServiceGrpc.getGetWorkflowTemplateMethod = getGetWorkflowTemplateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "GetWorkflowTemplate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("GetWorkflowTemplate")) + .build(); + } + } + } + return getGetWorkflowTemplateMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getInstantiateWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_INSTANTIATE_WORKFLOW_TEMPLATE = getInstantiateWorkflowTemplateMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getInstantiateWorkflowTemplateMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getInstantiateWorkflowTemplateMethod() { + return getInstantiateWorkflowTemplateMethodHelper(); + } + + private static io.grpc.MethodDescriptor getInstantiateWorkflowTemplateMethodHelper() { + io.grpc.MethodDescriptor getInstantiateWorkflowTemplateMethod; + if ((getInstantiateWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getInstantiateWorkflowTemplateMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getInstantiateWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getInstantiateWorkflowTemplateMethod) == null) { + WorkflowTemplateServiceGrpc.getInstantiateWorkflowTemplateMethod = getInstantiateWorkflowTemplateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "InstantiateWorkflowTemplate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("InstantiateWorkflowTemplate")) + .build(); + } + } + } + return getInstantiateWorkflowTemplateMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getInstantiateInlineWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_INSTANTIATE_INLINE_WORKFLOW_TEMPLATE = getInstantiateInlineWorkflowTemplateMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getInstantiateInlineWorkflowTemplateMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getInstantiateInlineWorkflowTemplateMethod() { + return getInstantiateInlineWorkflowTemplateMethodHelper(); + } + + private static io.grpc.MethodDescriptor getInstantiateInlineWorkflowTemplateMethodHelper() { + io.grpc.MethodDescriptor getInstantiateInlineWorkflowTemplateMethod; + if ((getInstantiateInlineWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getInstantiateInlineWorkflowTemplateMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getInstantiateInlineWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getInstantiateInlineWorkflowTemplateMethod) == null) { + WorkflowTemplateServiceGrpc.getInstantiateInlineWorkflowTemplateMethod = getInstantiateInlineWorkflowTemplateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "InstantiateInlineWorkflowTemplate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("InstantiateInlineWorkflowTemplate")) + .build(); + } + } + } + return getInstantiateInlineWorkflowTemplateMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getUpdateWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_UPDATE_WORKFLOW_TEMPLATE = getUpdateWorkflowTemplateMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getUpdateWorkflowTemplateMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getUpdateWorkflowTemplateMethod() { + return getUpdateWorkflowTemplateMethodHelper(); + } + + private static io.grpc.MethodDescriptor getUpdateWorkflowTemplateMethodHelper() { + io.grpc.MethodDescriptor getUpdateWorkflowTemplateMethod; + if ((getUpdateWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getUpdateWorkflowTemplateMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getUpdateWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getUpdateWorkflowTemplateMethod) == null) { + WorkflowTemplateServiceGrpc.getUpdateWorkflowTemplateMethod = getUpdateWorkflowTemplateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "UpdateWorkflowTemplate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("UpdateWorkflowTemplate")) + .build(); + } + } + } + return getUpdateWorkflowTemplateMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getListWorkflowTemplatesMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_LIST_WORKFLOW_TEMPLATES = getListWorkflowTemplatesMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getListWorkflowTemplatesMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getListWorkflowTemplatesMethod() { + return getListWorkflowTemplatesMethodHelper(); + } + + private static io.grpc.MethodDescriptor getListWorkflowTemplatesMethodHelper() { + io.grpc.MethodDescriptor getListWorkflowTemplatesMethod; + if ((getListWorkflowTemplatesMethod = WorkflowTemplateServiceGrpc.getListWorkflowTemplatesMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getListWorkflowTemplatesMethod = WorkflowTemplateServiceGrpc.getListWorkflowTemplatesMethod) == null) { + WorkflowTemplateServiceGrpc.getListWorkflowTemplatesMethod = getListWorkflowTemplatesMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "ListWorkflowTemplates")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("ListWorkflowTemplates")) + .build(); + } + } + } + return getListWorkflowTemplatesMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getDeleteWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_DELETE_WORKFLOW_TEMPLATE = getDeleteWorkflowTemplateMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getDeleteWorkflowTemplateMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getDeleteWorkflowTemplateMethod() { + return getDeleteWorkflowTemplateMethodHelper(); + } + + private static io.grpc.MethodDescriptor getDeleteWorkflowTemplateMethodHelper() { + io.grpc.MethodDescriptor getDeleteWorkflowTemplateMethod; + if ((getDeleteWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getDeleteWorkflowTemplateMethod) == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + if ((getDeleteWorkflowTemplateMethod = WorkflowTemplateServiceGrpc.getDeleteWorkflowTemplateMethod) == null) { + WorkflowTemplateServiceGrpc.getDeleteWorkflowTemplateMethod = getDeleteWorkflowTemplateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService", "DeleteWorkflowTemplate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new WorkflowTemplateServiceMethodDescriptorSupplier("DeleteWorkflowTemplate")) + .build(); + } + } + } + return getDeleteWorkflowTemplateMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static WorkflowTemplateServiceStub newStub(io.grpc.Channel channel) { + return new WorkflowTemplateServiceStub(channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static WorkflowTemplateServiceBlockingStub newBlockingStub( + io.grpc.Channel channel) { + return new WorkflowTemplateServiceBlockingStub(channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static WorkflowTemplateServiceFutureStub newFutureStub( + io.grpc.Channel channel) { + return new WorkflowTemplateServiceFutureStub(channel); + } + + /** + *
+   * The API interface for managing Workflow Templates in the
+   * Cloud Dataproc API.
+   * 
+ */ + public static abstract class WorkflowTemplateServiceImplBase implements io.grpc.BindableService { + + /** + *
+     * Creates new workflow template.
+     * 
+ */ + public void createWorkflowTemplate(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getCreateWorkflowTemplateMethodHelper(), responseObserver); + } + + /** + *
+     * Retrieves the latest workflow template.
+     * Can retrieve previously instantiated template by specifying optional
+     * version parameter.
+     * 
+ */ + public void getWorkflowTemplate(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getGetWorkflowTemplateMethodHelper(), responseObserver); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public void instantiateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getInstantiateWorkflowTemplateMethodHelper(), responseObserver); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * This method is equivalent to executing the sequence
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public void instantiateInlineWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getInstantiateInlineWorkflowTemplateMethodHelper(), responseObserver); + } + + /** + *
+     * Updates (replaces) workflow template. The updated template
+     * must contain version that matches the current server version.
+     * 
+ */ + public void updateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getUpdateWorkflowTemplateMethodHelper(), responseObserver); + } + + /** + *
+     * Lists workflows that match the specified filter in the request.
+     * 
+ */ + public void listWorkflowTemplates(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getListWorkflowTemplatesMethodHelper(), responseObserver); + } + + /** + *
+     * Deletes a workflow template. It does not cancel in-progress workflows.
+     * 
+ */ + public void deleteWorkflowTemplate(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getDeleteWorkflowTemplateMethodHelper(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateWorkflowTemplateMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest, + com.google.cloud.dataproc.v1beta2.WorkflowTemplate>( + this, METHODID_CREATE_WORKFLOW_TEMPLATE))) + .addMethod( + getGetWorkflowTemplateMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest, + com.google.cloud.dataproc.v1beta2.WorkflowTemplate>( + this, METHODID_GET_WORKFLOW_TEMPLATE))) + .addMethod( + getInstantiateWorkflowTemplateMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest, + com.google.longrunning.Operation>( + this, METHODID_INSTANTIATE_WORKFLOW_TEMPLATE))) + .addMethod( + getInstantiateInlineWorkflowTemplateMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest, + com.google.longrunning.Operation>( + this, METHODID_INSTANTIATE_INLINE_WORKFLOW_TEMPLATE))) + .addMethod( + getUpdateWorkflowTemplateMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest, + com.google.cloud.dataproc.v1beta2.WorkflowTemplate>( + this, METHODID_UPDATE_WORKFLOW_TEMPLATE))) + .addMethod( + getListWorkflowTemplatesMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest, + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse>( + this, METHODID_LIST_WORKFLOW_TEMPLATES))) + .addMethod( + getDeleteWorkflowTemplateMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest, + com.google.protobuf.Empty>( + this, METHODID_DELETE_WORKFLOW_TEMPLATE))) + .build(); + } + } + + /** + *
+   * The API interface for managing Workflow Templates in the
+   * Cloud Dataproc API.
+   * 
+ */ + public static final class WorkflowTemplateServiceStub extends io.grpc.stub.AbstractStub { + private WorkflowTemplateServiceStub(io.grpc.Channel channel) { + super(channel); + } + + private WorkflowTemplateServiceStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected WorkflowTemplateServiceStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new WorkflowTemplateServiceStub(channel, callOptions); + } + + /** + *
+     * Creates new workflow template.
+     * 
+ */ + public void createWorkflowTemplate(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getCreateWorkflowTemplateMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Retrieves the latest workflow template.
+     * Can retrieve previously instantiated template by specifying optional
+     * version parameter.
+     * 
+ */ + public void getWorkflowTemplate(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getGetWorkflowTemplateMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public void instantiateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getInstantiateWorkflowTemplateMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * This method is equivalent to executing the sequence
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public void instantiateInlineWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getInstantiateInlineWorkflowTemplateMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Updates (replaces) workflow template. The updated template
+     * must contain version that matches the current server version.
+     * 
+ */ + public void updateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getUpdateWorkflowTemplateMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Lists workflows that match the specified filter in the request.
+     * 
+ */ + public void listWorkflowTemplates(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getListWorkflowTemplatesMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Deletes a workflow template. It does not cancel in-progress workflows.
+     * 
+ */ + public void deleteWorkflowTemplate(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getDeleteWorkflowTemplateMethodHelper(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * The API interface for managing Workflow Templates in the
+   * Cloud Dataproc API.
+   * 
+ */ + public static final class WorkflowTemplateServiceBlockingStub extends io.grpc.stub.AbstractStub { + private WorkflowTemplateServiceBlockingStub(io.grpc.Channel channel) { + super(channel); + } + + private WorkflowTemplateServiceBlockingStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected WorkflowTemplateServiceBlockingStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new WorkflowTemplateServiceBlockingStub(channel, callOptions); + } + + /** + *
+     * Creates new workflow template.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate createWorkflowTemplate(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getCreateWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Retrieves the latest workflow template.
+     * Can retrieve previously instantiated template by specifying optional
+     * version parameter.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getWorkflowTemplate(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getGetWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public com.google.longrunning.Operation instantiateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getInstantiateWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * This method is equivalent to executing the sequence
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public com.google.longrunning.Operation instantiateInlineWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getInstantiateInlineWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Updates (replaces) workflow template. The updated template
+     * must contain version that matches the current server version.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate updateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getUpdateWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Lists workflows that match the specified filter in the request.
+     * 
+ */ + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse listWorkflowTemplates(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request) { + return blockingUnaryCall( + getChannel(), getListWorkflowTemplatesMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Deletes a workflow template. It does not cancel in-progress workflows.
+     * 
+ */ + public com.google.protobuf.Empty deleteWorkflowTemplate(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getDeleteWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+   * The API interface for managing Workflow Templates in the
+   * Cloud Dataproc API.
+   * 
+ */ + public static final class WorkflowTemplateServiceFutureStub extends io.grpc.stub.AbstractStub { + private WorkflowTemplateServiceFutureStub(io.grpc.Channel channel) { + super(channel); + } + + private WorkflowTemplateServiceFutureStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected WorkflowTemplateServiceFutureStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new WorkflowTemplateServiceFutureStub(channel, callOptions); + } + + /** + *
+     * Creates new workflow template.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture createWorkflowTemplate( + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request) { + return futureUnaryCall( + getChannel().newCall(getCreateWorkflowTemplateMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Retrieves the latest workflow template.
+     * Can retrieve previously instantiated template by specifying optional
+     * version parameter.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getWorkflowTemplate( + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request) { + return futureUnaryCall( + getChannel().newCall(getGetWorkflowTemplateMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture instantiateWorkflowTemplate( + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request) { + return futureUnaryCall( + getChannel().newCall(getInstantiateWorkflowTemplateMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Instantiates a template and begins execution.
+     * This method is equivalent to executing the sequence
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
+     * The returned Operation can be used to track execution of
+     * workflow by polling
+     * [operations.get][google.longrunning.Operations.GetOperation].
+     * The Operation will complete when entire workflow is finished.
+     * The running workflow can be aborted via
+     * [operations.cancel][google.longrunning.Operations.CancelOperation].
+     * This will cause any inflight jobs to be cancelled and workflow-owned
+     * clusters to be deleted.
+     * The [Operation.metadata][google.longrunning.Operation.metadata] will be
+     * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
+     * On successful completion,
+     * [Operation.response][google.longrunning.Operation.response] will be
+     * [Empty][google.protobuf.Empty].
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture instantiateInlineWorkflowTemplate( + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request) { + return futureUnaryCall( + getChannel().newCall(getInstantiateInlineWorkflowTemplateMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Updates (replaces) workflow template. The updated template
+     * must contain version that matches the current server version.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture updateWorkflowTemplate( + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request) { + return futureUnaryCall( + getChannel().newCall(getUpdateWorkflowTemplateMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Lists workflows that match the specified filter in the request.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture listWorkflowTemplates( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request) { + return futureUnaryCall( + getChannel().newCall(getListWorkflowTemplatesMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Deletes a workflow template. It does not cancel in-progress workflows.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture deleteWorkflowTemplate( + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request) { + return futureUnaryCall( + getChannel().newCall(getDeleteWorkflowTemplateMethodHelper(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_WORKFLOW_TEMPLATE = 0; + private static final int METHODID_GET_WORKFLOW_TEMPLATE = 1; + private static final int METHODID_INSTANTIATE_WORKFLOW_TEMPLATE = 2; + private static final int METHODID_INSTANTIATE_INLINE_WORKFLOW_TEMPLATE = 3; + private static final int METHODID_UPDATE_WORKFLOW_TEMPLATE = 4; + private static final int METHODID_LIST_WORKFLOW_TEMPLATES = 5; + private static final int METHODID_DELETE_WORKFLOW_TEMPLATE = 6; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final WorkflowTemplateServiceImplBase serviceImpl; + private final int methodId; + + MethodHandlers(WorkflowTemplateServiceImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_WORKFLOW_TEMPLATE: + serviceImpl.createWorkflowTemplate((com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_WORKFLOW_TEMPLATE: + serviceImpl.getWorkflowTemplate((com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_INSTANTIATE_WORKFLOW_TEMPLATE: + serviceImpl.instantiateWorkflowTemplate((com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_INSTANTIATE_INLINE_WORKFLOW_TEMPLATE: + serviceImpl.instantiateInlineWorkflowTemplate((com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_WORKFLOW_TEMPLATE: + serviceImpl.updateWorkflowTemplate((com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_WORKFLOW_TEMPLATES: + serviceImpl.listWorkflowTemplates((com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_WORKFLOW_TEMPLATE: + serviceImpl.deleteWorkflowTemplate((com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class WorkflowTemplateServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + WorkflowTemplateServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("WorkflowTemplateService"); + } + } + + private static final class WorkflowTemplateServiceFileDescriptorSupplier + extends WorkflowTemplateServiceBaseDescriptorSupplier { + WorkflowTemplateServiceFileDescriptorSupplier() {} + } + + private static final class WorkflowTemplateServiceMethodDescriptorSupplier + extends WorkflowTemplateServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + WorkflowTemplateServiceMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (WorkflowTemplateServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new WorkflowTemplateServiceFileDescriptorSupplier()) + .addMethod(getCreateWorkflowTemplateMethodHelper()) + .addMethod(getGetWorkflowTemplateMethodHelper()) + .addMethod(getInstantiateWorkflowTemplateMethodHelper()) + .addMethod(getInstantiateInlineWorkflowTemplateMethodHelper()) + .addMethod(getUpdateWorkflowTemplateMethodHelper()) + .addMethod(getListWorkflowTemplatesMethodHelper()) + .addMethod(getDeleteWorkflowTemplateMethodHelper()) + .build(); + } + } + } + return result; + } +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java new file mode 100644 index 000000000000..990efc99efde --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java @@ -0,0 +1,723 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies the type and number of accelerator cards attached to the instances
+ * of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.AcceleratorConfig} + */ +public final class AcceleratorConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.AcceleratorConfig) + AcceleratorConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use AcceleratorConfig.newBuilder() to construct. + private AcceleratorConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private AcceleratorConfig() { + acceleratorTypeUri_ = ""; + acceleratorCount_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AcceleratorConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + acceleratorTypeUri_ = s; + break; + } + case 16: { + + acceleratorCount_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.AcceleratorConfig.class, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder.class); + } + + public static final int ACCELERATOR_TYPE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object acceleratorTypeUri_; + /** + *
+   * Full URL, partial URI, or short name of the accelerator type resource to
+   * expose to this instance. See [Compute Engine AcceleratorTypes](
+   * /compute/docs/reference/beta/acceleratorTypes)
+   * Examples
+   * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `nvidia-tesla-k80`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the accelerator type
+   * resource, for example, `nvidia-tesla-k80`.
+   * 
+ * + * string accelerator_type_uri = 1; + */ + public java.lang.String getAcceleratorTypeUri() { + java.lang.Object ref = acceleratorTypeUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + acceleratorTypeUri_ = s; + return s; + } + } + /** + *
+   * Full URL, partial URI, or short name of the accelerator type resource to
+   * expose to this instance. See [Compute Engine AcceleratorTypes](
+   * /compute/docs/reference/beta/acceleratorTypes)
+   * Examples
+   * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `nvidia-tesla-k80`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the accelerator type
+   * resource, for example, `nvidia-tesla-k80`.
+   * 
+ * + * string accelerator_type_uri = 1; + */ + public com.google.protobuf.ByteString + getAcceleratorTypeUriBytes() { + java.lang.Object ref = acceleratorTypeUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + acceleratorTypeUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ACCELERATOR_COUNT_FIELD_NUMBER = 2; + private int acceleratorCount_; + /** + *
+   * The number of the accelerator cards of this type exposed to this instance.
+   * 
+ * + * int32 accelerator_count = 2; + */ + public int getAcceleratorCount() { + return acceleratorCount_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getAcceleratorTypeUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, acceleratorTypeUri_); + } + if (acceleratorCount_ != 0) { + output.writeInt32(2, acceleratorCount_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getAcceleratorTypeUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, acceleratorTypeUri_); + } + if (acceleratorCount_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, acceleratorCount_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.AcceleratorConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.AcceleratorConfig other = (com.google.cloud.dataproc.v1beta2.AcceleratorConfig) obj; + + boolean result = true; + result = result && getAcceleratorTypeUri() + .equals(other.getAcceleratorTypeUri()); + result = result && (getAcceleratorCount() + == other.getAcceleratorCount()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ACCELERATOR_TYPE_URI_FIELD_NUMBER; + hash = (53 * hash) + getAcceleratorTypeUri().hashCode(); + hash = (37 * hash) + ACCELERATOR_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getAcceleratorCount(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.AcceleratorConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies the type and number of accelerator cards attached to the instances
+   * of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.AcceleratorConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.AcceleratorConfig) + com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.AcceleratorConfig.class, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.AcceleratorConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + acceleratorTypeUri_ = ""; + + acceleratorCount_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.AcceleratorConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig build() { + com.google.cloud.dataproc.v1beta2.AcceleratorConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.AcceleratorConfig result = new com.google.cloud.dataproc.v1beta2.AcceleratorConfig(this); + result.acceleratorTypeUri_ = acceleratorTypeUri_; + result.acceleratorCount_ = acceleratorCount_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.AcceleratorConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.AcceleratorConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.AcceleratorConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.AcceleratorConfig.getDefaultInstance()) return this; + if (!other.getAcceleratorTypeUri().isEmpty()) { + acceleratorTypeUri_ = other.acceleratorTypeUri_; + onChanged(); + } + if (other.getAcceleratorCount() != 0) { + setAcceleratorCount(other.getAcceleratorCount()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.AcceleratorConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.AcceleratorConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object acceleratorTypeUri_ = ""; + /** + *
+     * Full URL, partial URI, or short name of the accelerator type resource to
+     * expose to this instance. See [Compute Engine AcceleratorTypes](
+     * /compute/docs/reference/beta/acceleratorTypes)
+     * Examples
+     * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `nvidia-tesla-k80`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the accelerator type
+     * resource, for example, `nvidia-tesla-k80`.
+     * 
+ * + * string accelerator_type_uri = 1; + */ + public java.lang.String getAcceleratorTypeUri() { + java.lang.Object ref = acceleratorTypeUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + acceleratorTypeUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Full URL, partial URI, or short name of the accelerator type resource to
+     * expose to this instance. See [Compute Engine AcceleratorTypes](
+     * /compute/docs/reference/beta/acceleratorTypes)
+     * Examples
+     * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `nvidia-tesla-k80`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the accelerator type
+     * resource, for example, `nvidia-tesla-k80`.
+     * 
+ * + * string accelerator_type_uri = 1; + */ + public com.google.protobuf.ByteString + getAcceleratorTypeUriBytes() { + java.lang.Object ref = acceleratorTypeUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + acceleratorTypeUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Full URL, partial URI, or short name of the accelerator type resource to
+     * expose to this instance. See [Compute Engine AcceleratorTypes](
+     * /compute/docs/reference/beta/acceleratorTypes)
+     * Examples
+     * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `nvidia-tesla-k80`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the accelerator type
+     * resource, for example, `nvidia-tesla-k80`.
+     * 
+ * + * string accelerator_type_uri = 1; + */ + public Builder setAcceleratorTypeUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + acceleratorTypeUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Full URL, partial URI, or short name of the accelerator type resource to
+     * expose to this instance. See [Compute Engine AcceleratorTypes](
+     * /compute/docs/reference/beta/acceleratorTypes)
+     * Examples
+     * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `nvidia-tesla-k80`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the accelerator type
+     * resource, for example, `nvidia-tesla-k80`.
+     * 
+ * + * string accelerator_type_uri = 1; + */ + public Builder clearAcceleratorTypeUri() { + + acceleratorTypeUri_ = getDefaultInstance().getAcceleratorTypeUri(); + onChanged(); + return this; + } + /** + *
+     * Full URL, partial URI, or short name of the accelerator type resource to
+     * expose to this instance. See [Compute Engine AcceleratorTypes](
+     * /compute/docs/reference/beta/acceleratorTypes)
+     * Examples
+     * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+     * * `nvidia-tesla-k80`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the accelerator type
+     * resource, for example, `nvidia-tesla-k80`.
+     * 
+ * + * string accelerator_type_uri = 1; + */ + public Builder setAcceleratorTypeUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + acceleratorTypeUri_ = value; + onChanged(); + return this; + } + + private int acceleratorCount_ ; + /** + *
+     * The number of the accelerator cards of this type exposed to this instance.
+     * 
+ * + * int32 accelerator_count = 2; + */ + public int getAcceleratorCount() { + return acceleratorCount_; + } + /** + *
+     * The number of the accelerator cards of this type exposed to this instance.
+     * 
+ * + * int32 accelerator_count = 2; + */ + public Builder setAcceleratorCount(int value) { + + acceleratorCount_ = value; + onChanged(); + return this; + } + /** + *
+     * The number of the accelerator cards of this type exposed to this instance.
+     * 
+ * + * int32 accelerator_count = 2; + */ + public Builder clearAcceleratorCount() { + + acceleratorCount_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.AcceleratorConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.AcceleratorConfig) + private static final com.google.cloud.dataproc.v1beta2.AcceleratorConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.AcceleratorConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AcceleratorConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AcceleratorConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java new file mode 100644 index 000000000000..6762f52e0cdd --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java @@ -0,0 +1,56 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface AcceleratorConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.AcceleratorConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Full URL, partial URI, or short name of the accelerator type resource to
+   * expose to this instance. See [Compute Engine AcceleratorTypes](
+   * /compute/docs/reference/beta/acceleratorTypes)
+   * Examples
+   * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `nvidia-tesla-k80`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the accelerator type
+   * resource, for example, `nvidia-tesla-k80`.
+   * 
+ * + * string accelerator_type_uri = 1; + */ + java.lang.String getAcceleratorTypeUri(); + /** + *
+   * Full URL, partial URI, or short name of the accelerator type resource to
+   * expose to this instance. See [Compute Engine AcceleratorTypes](
+   * /compute/docs/reference/beta/acceleratorTypes)
+   * Examples
+   * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
+   * * `nvidia-tesla-k80`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the accelerator type
+   * resource, for example, `nvidia-tesla-k80`.
+   * 
+ * + * string accelerator_type_uri = 1; + */ + com.google.protobuf.ByteString + getAcceleratorTypeUriBytes(); + + /** + *
+   * The number of the accelerator cards of this type exposed to this instance.
+   * 
+ * + * int32 accelerator_count = 2; + */ + int getAcceleratorCount(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java new file mode 100644 index 000000000000..cad2f6e50eee --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java @@ -0,0 +1,894 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to cancel a job.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CancelJobRequest} + */ +public final class CancelJobRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.CancelJobRequest) + CancelJobRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use CancelJobRequest.newBuilder() to construct. + private CancelJobRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CancelJobRequest() { + projectId_ = ""; + region_ = ""; + jobId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CancelJobRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CancelJobRequest.class, com.google.cloud.dataproc.v1beta2.CancelJobRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object jobId_; + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } + } + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, jobId_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, jobId_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.CancelJobRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.CancelJobRequest other = (com.google.cloud.dataproc.v1beta2.CancelJobRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getJobId() + .equals(other.getJobId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.CancelJobRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to cancel a job.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CancelJobRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.CancelJobRequest) + com.google.cloud.dataproc.v1beta2.CancelJobRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CancelJobRequest.class, com.google.cloud.dataproc.v1beta2.CancelJobRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.CancelJobRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + jobId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CancelJobRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.CancelJobRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CancelJobRequest build() { + com.google.cloud.dataproc.v1beta2.CancelJobRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CancelJobRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.CancelJobRequest result = new com.google.cloud.dataproc.v1beta2.CancelJobRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.jobId_ = jobId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.CancelJobRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.CancelJobRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.CancelJobRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.CancelJobRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getJobId().isEmpty()) { + jobId_ = other.jobId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.CancelJobRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.CancelJobRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object jobId_ = ""; + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + jobId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder clearJobId() { + + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + jobId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.CancelJobRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CancelJobRequest) + private static final com.google.cloud.dataproc.v1beta2.CancelJobRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.CancelJobRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.CancelJobRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CancelJobRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CancelJobRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CancelJobRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequestOrBuilder.java new file mode 100644 index 000000000000..b0a258a3685f --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequestOrBuilder.java @@ -0,0 +1,65 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface CancelJobRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.CancelJobRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + java.lang.String getJobId(); + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + com.google.protobuf.ByteString + getJobIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java new file mode 100644 index 000000000000..35f46528be92 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java @@ -0,0 +1,2438 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Describes the identifying information, config, and status of
+ * a cluster of Compute Engine instances.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.Cluster} + */ +public final class Cluster extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.Cluster) + ClusterOrBuilder { +private static final long serialVersionUID = 0L; + // Use Cluster.newBuilder() to construct. + private Cluster(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Cluster() { + projectId_ = ""; + clusterName_ = ""; + statusHistory_ = java.util.Collections.emptyList(); + clusterUuid_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Cluster( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder subBuilder = null; + if (config_ != null) { + subBuilder = config_.toBuilder(); + } + config_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(config_); + config_ = subBuilder.buildPartial(); + } + + break; + } + case 34: { + com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder subBuilder = null; + if (status_ != null) { + subBuilder = status_.toBuilder(); + } + status_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterStatus.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(status_); + status_ = subBuilder.buildPartial(); + } + + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + statusHistory_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000020; + } + statusHistory_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterStatus.parser(), extensionRegistry)); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000008; + } + com.google.protobuf.MapEntry + labels__ = input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put( + labels__.getKey(), labels__.getValue()); + break; + } + case 74: { + com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder subBuilder = null; + if (metrics_ != null) { + subBuilder = metrics_.toBuilder(); + } + metrics_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterMetrics.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(metrics_); + metrics_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + statusHistory_ = java.util.Collections.unmodifiableList(statusHistory_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 8: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_Cluster_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.Cluster.class, com.google.cloud.dataproc.v1beta2.Cluster.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The Google Cloud Platform project ID that the cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The Google Cloud Platform project ID that the cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The cluster name. Cluster names within a project must be
+   * unique. Names of deleted clusters can be reused.
+   * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The cluster name. Cluster names within a project must be
+   * unique. Names of deleted clusters can be reused.
+   * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONFIG_FIELD_NUMBER = 3; + private com.google.cloud.dataproc.v1beta2.ClusterConfig config_; + /** + *
+   * Required. The cluster config. Note that Cloud Dataproc may set
+   * default values, and values may change when clusters are updated.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public boolean hasConfig() { + return config_ != null; + } + /** + *
+   * Required. The cluster config. Note that Cloud Dataproc may set
+   * default values, and values may change when clusters are updated.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig() { + return config_ == null ? com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_; + } + /** + *
+   * Required. The cluster config. Note that Cloud Dataproc may set
+   * default values, and values may change when clusters are updated.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder() { + return getConfig(); + } + + public static final int LABELS_FIELD_NUMBER = 8; + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_Cluster_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int STATUS_FIELD_NUMBER = 4; + private com.google.cloud.dataproc.v1beta2.ClusterStatus status_; + /** + *
+   * Output only. Cluster status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public boolean hasStatus() { + return status_ != null; + } + /** + *
+   * Output only. Cluster status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus getStatus() { + return status_ == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance() : status_; + } + /** + *
+   * Output only. Cluster status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusOrBuilder() { + return getStatus(); + } + + public static final int STATUS_HISTORY_FIELD_NUMBER = 7; + private java.util.List statusHistory_; + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public java.util.List getStatusHistoryList() { + return statusHistory_; + } + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public java.util.List + getStatusHistoryOrBuilderList() { + return statusHistory_; + } + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public int getStatusHistoryCount() { + return statusHistory_.size(); + } + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus getStatusHistory(int index) { + return statusHistory_.get(index); + } + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusHistoryOrBuilder( + int index) { + return statusHistory_.get(index); + } + + public static final int CLUSTER_UUID_FIELD_NUMBER = 6; + private volatile java.lang.Object clusterUuid_; + /** + *
+   * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+   * generates this value when it creates the cluster.
+   * 
+ * + * string cluster_uuid = 6; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } + } + /** + *
+   * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+   * generates this value when it creates the cluster.
+   * 
+ * + * string cluster_uuid = 6; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int METRICS_FIELD_NUMBER = 9; + private com.google.cloud.dataproc.v1beta2.ClusterMetrics metrics_; + /** + *
+   * Contains cluster daemon metrics such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public boolean hasMetrics() { + return metrics_ != null; + } + /** + *
+   * Contains cluster daemon metrics such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterMetrics getMetrics() { + return metrics_ == null ? com.google.cloud.dataproc.v1beta2.ClusterMetrics.getDefaultInstance() : metrics_; + } + /** + *
+   * Contains cluster daemon metrics such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder getMetricsOrBuilder() { + return getMetrics(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_); + } + if (config_ != null) { + output.writeMessage(3, getConfig()); + } + if (status_ != null) { + output.writeMessage(4, getStatus()); + } + if (!getClusterUuidBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, clusterUuid_); + } + for (int i = 0; i < statusHistory_.size(); i++) { + output.writeMessage(7, statusHistory_.get(i)); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetLabels(), + LabelsDefaultEntryHolder.defaultEntry, + 8); + if (metrics_ != null) { + output.writeMessage(9, getMetrics()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_); + } + if (config_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getConfig()); + } + if (status_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getStatus()); + } + if (!getClusterUuidBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, clusterUuid_); + } + for (int i = 0; i < statusHistory_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, statusHistory_.get(i)); + } + for (java.util.Map.Entry entry + : internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + labels__ = LabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, labels__); + } + if (metrics_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, getMetrics()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.Cluster)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.Cluster other = (com.google.cloud.dataproc.v1beta2.Cluster) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && (hasConfig() == other.hasConfig()); + if (hasConfig()) { + result = result && getConfig() + .equals(other.getConfig()); + } + result = result && internalGetLabels().equals( + other.internalGetLabels()); + result = result && (hasStatus() == other.hasStatus()); + if (hasStatus()) { + result = result && getStatus() + .equals(other.getStatus()); + } + result = result && getStatusHistoryList() + .equals(other.getStatusHistoryList()); + result = result && getClusterUuid() + .equals(other.getClusterUuid()); + result = result && (hasMetrics() == other.hasMetrics()); + if (hasMetrics()) { + result = result && getMetrics() + .equals(other.getMetrics()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + if (hasConfig()) { + hash = (37 * hash) + CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getConfig().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (getStatusHistoryCount() > 0) { + hash = (37 * hash) + STATUS_HISTORY_FIELD_NUMBER; + hash = (53 * hash) + getStatusHistoryList().hashCode(); + } + hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; + hash = (53 * hash) + getClusterUuid().hashCode(); + if (hasMetrics()) { + hash = (37 * hash) + METRICS_FIELD_NUMBER; + hash = (53 * hash) + getMetrics().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.Cluster parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.Cluster prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Describes the identifying information, config, and status of
+   * a cluster of Compute Engine instances.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.Cluster} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.Cluster) + com.google.cloud.dataproc.v1beta2.ClusterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 8: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 8: + return internalGetMutableLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_Cluster_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.Cluster.class, com.google.cloud.dataproc.v1beta2.Cluster.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.Cluster.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getStatusHistoryFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + clusterName_ = ""; + + if (configBuilder_ == null) { + config_ = null; + } else { + config_ = null; + configBuilder_ = null; + } + internalGetMutableLabels().clear(); + if (statusBuilder_ == null) { + status_ = null; + } else { + status_ = null; + statusBuilder_ = null; + } + if (statusHistoryBuilder_ == null) { + statusHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + statusHistoryBuilder_.clear(); + } + clusterUuid_ = ""; + + if (metricsBuilder_ == null) { + metrics_ = null; + } else { + metrics_ = null; + metricsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Cluster getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Cluster build() { + com.google.cloud.dataproc.v1beta2.Cluster result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Cluster buildPartial() { + com.google.cloud.dataproc.v1beta2.Cluster result = new com.google.cloud.dataproc.v1beta2.Cluster(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.projectId_ = projectId_; + result.clusterName_ = clusterName_; + if (configBuilder_ == null) { + result.config_ = config_; + } else { + result.config_ = configBuilder_.build(); + } + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + if (statusBuilder_ == null) { + result.status_ = status_; + } else { + result.status_ = statusBuilder_.build(); + } + if (statusHistoryBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + statusHistory_ = java.util.Collections.unmodifiableList(statusHistory_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.statusHistory_ = statusHistory_; + } else { + result.statusHistory_ = statusHistoryBuilder_.build(); + } + result.clusterUuid_ = clusterUuid_; + if (metricsBuilder_ == null) { + result.metrics_ = metrics_; + } else { + result.metrics_ = metricsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.Cluster) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.Cluster)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.Cluster other) { + if (other == com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (other.hasConfig()) { + mergeConfig(other.getConfig()); + } + internalGetMutableLabels().mergeFrom( + other.internalGetLabels()); + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (statusHistoryBuilder_ == null) { + if (!other.statusHistory_.isEmpty()) { + if (statusHistory_.isEmpty()) { + statusHistory_ = other.statusHistory_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureStatusHistoryIsMutable(); + statusHistory_.addAll(other.statusHistory_); + } + onChanged(); + } + } else { + if (!other.statusHistory_.isEmpty()) { + if (statusHistoryBuilder_.isEmpty()) { + statusHistoryBuilder_.dispose(); + statusHistoryBuilder_ = null; + statusHistory_ = other.statusHistory_; + bitField0_ = (bitField0_ & ~0x00000020); + statusHistoryBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getStatusHistoryFieldBuilder() : null; + } else { + statusHistoryBuilder_.addAllMessages(other.statusHistory_); + } + } + } + if (!other.getClusterUuid().isEmpty()) { + clusterUuid_ = other.clusterUuid_; + onChanged(); + } + if (other.hasMetrics()) { + mergeMetrics(other.getMetrics()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.Cluster parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.Cluster) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The Google Cloud Platform project ID that the cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Google Cloud Platform project ID that the cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Google Cloud Platform project ID that the cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Google Cloud Platform project ID that the cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The Google Cloud Platform project ID that the cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The cluster name. Cluster names within a project must be
+     * unique. Names of deleted clusters can be reused.
+     * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The cluster name. Cluster names within a project must be
+     * unique. Names of deleted clusters can be reused.
+     * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The cluster name. Cluster names within a project must be
+     * unique. Names of deleted clusters can be reused.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name. Cluster names within a project must be
+     * unique. Names of deleted clusters can be reused.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name. Cluster names within a project must be
+     * unique. Names of deleted clusters can be reused.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ClusterConfig config_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder> configBuilder_; + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public boolean hasConfig() { + return configBuilder_ != null || config_ != null; + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig() { + if (configBuilder_ == null) { + return config_ == null ? com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_; + } else { + return configBuilder_.getMessage(); + } + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder setConfig(com.google.cloud.dataproc.v1beta2.ClusterConfig value) { + if (configBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + config_ = value; + onChanged(); + } else { + configBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder setConfig( + com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder builderForValue) { + if (configBuilder_ == null) { + config_ = builderForValue.build(); + onChanged(); + } else { + configBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder mergeConfig(com.google.cloud.dataproc.v1beta2.ClusterConfig value) { + if (configBuilder_ == null) { + if (config_ != null) { + config_ = + com.google.cloud.dataproc.v1beta2.ClusterConfig.newBuilder(config_).mergeFrom(value).buildPartial(); + } else { + config_ = value; + } + onChanged(); + } else { + configBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder clearConfig() { + if (configBuilder_ == null) { + config_ = null; + onChanged(); + } else { + config_ = null; + configBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder getConfigBuilder() { + + onChanged(); + return getConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder() { + if (configBuilder_ != null) { + return configBuilder_.getMessageOrBuilder(); + } else { + return config_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_; + } + } + /** + *
+     * Required. The cluster config. Note that Cloud Dataproc may set
+     * default values, and values may change when clusters are updated.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder> + getConfigFieldBuilder() { + if (configBuilder_ == null) { + configBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder>( + getConfig(), + getParentForChildren(), + isClean()); + config_ = null; + } + return configBuilder_; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged();; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public Builder removeLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + public Builder putLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a cluster.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public Builder putAllLabels( + java.util.Map values) { + internalGetMutableLabels().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ClusterStatus status_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterStatus, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder> statusBuilder_; + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public boolean hasStatus() { + return statusBuilder_ != null || status_ != null; + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public Builder setStatus(com.google.cloud.dataproc.v1beta2.ClusterStatus value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + onChanged(); + } else { + statusBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public Builder setStatus( + com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + onChanged(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public Builder mergeStatus(com.google.cloud.dataproc.v1beta2.ClusterStatus value) { + if (statusBuilder_ == null) { + if (status_ != null) { + status_ = + com.google.cloud.dataproc.v1beta2.ClusterStatus.newBuilder(status_).mergeFrom(value).buildPartial(); + } else { + status_ = value; + } + onChanged(); + } else { + statusBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public Builder clearStatus() { + if (statusBuilder_ == null) { + status_ = null; + onChanged(); + } else { + status_ = null; + statusBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder getStatusBuilder() { + + onChanged(); + return getStatusFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance() : status_; + } + } + /** + *
+     * Output only. Cluster status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterStatus, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder> + getStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterStatus, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder>( + getStatus(), + getParentForChildren(), + isClean()); + status_ = null; + } + return statusBuilder_; + } + + private java.util.List statusHistory_ = + java.util.Collections.emptyList(); + private void ensureStatusHistoryIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + statusHistory_ = new java.util.ArrayList(statusHistory_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterStatus, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder> statusHistoryBuilder_; + + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public java.util.List getStatusHistoryList() { + if (statusHistoryBuilder_ == null) { + return java.util.Collections.unmodifiableList(statusHistory_); + } else { + return statusHistoryBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public int getStatusHistoryCount() { + if (statusHistoryBuilder_ == null) { + return statusHistory_.size(); + } else { + return statusHistoryBuilder_.getCount(); + } + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus getStatusHistory(int index) { + if (statusHistoryBuilder_ == null) { + return statusHistory_.get(index); + } else { + return statusHistoryBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder setStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.set(index, value); + onChanged(); + } else { + statusHistoryBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder setStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.set(index, builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder addStatusHistory(com.google.cloud.dataproc.v1beta2.ClusterStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.add(value); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder addStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.add(index, value); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder addStatusHistory( + com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.add(builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder addStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.add(index, builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder addAllStatusHistory( + java.lang.Iterable values) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, statusHistory_); + onChanged(); + } else { + statusHistoryBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder clearStatusHistory() { + if (statusHistoryBuilder_ == null) { + statusHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + statusHistoryBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public Builder removeStatusHistory(int index) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.remove(index); + onChanged(); + } else { + statusHistoryBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder getStatusHistoryBuilder( + int index) { + return getStatusHistoryFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusHistoryOrBuilder( + int index) { + if (statusHistoryBuilder_ == null) { + return statusHistory_.get(index); } else { + return statusHistoryBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public java.util.List + getStatusHistoryOrBuilderList() { + if (statusHistoryBuilder_ != null) { + return statusHistoryBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(statusHistory_); + } + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder addStatusHistoryBuilder() { + return getStatusHistoryFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance()); + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder addStatusHistoryBuilder( + int index) { + return getStatusHistoryFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance()); + } + /** + *
+     * Output only. The previous cluster status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + public java.util.List + getStatusHistoryBuilderList() { + return getStatusHistoryFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterStatus, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder> + getStatusHistoryFieldBuilder() { + if (statusHistoryBuilder_ == null) { + statusHistoryBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterStatus, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder>( + statusHistory_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + statusHistory_ = null; + } + return statusHistoryBuilder_; + } + + private java.lang.Object clusterUuid_ = ""; + /** + *
+     * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+     * generates this value when it creates the cluster.
+     * 
+ * + * string cluster_uuid = 6; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+     * generates this value when it creates the cluster.
+     * 
+ * + * string cluster_uuid = 6; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+     * generates this value when it creates the cluster.
+     * 
+ * + * string cluster_uuid = 6; + */ + public Builder setClusterUuid( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterUuid_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+     * generates this value when it creates the cluster.
+     * 
+ * + * string cluster_uuid = 6; + */ + public Builder clearClusterUuid() { + + clusterUuid_ = getDefaultInstance().getClusterUuid(); + onChanged(); + return this; + } + /** + *
+     * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+     * generates this value when it creates the cluster.
+     * 
+ * + * string cluster_uuid = 6; + */ + public Builder setClusterUuidBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterUuid_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ClusterMetrics metrics_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterMetrics, com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder, com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder> metricsBuilder_; + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public boolean hasMetrics() { + return metricsBuilder_ != null || metrics_ != null; + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterMetrics getMetrics() { + if (metricsBuilder_ == null) { + return metrics_ == null ? com.google.cloud.dataproc.v1beta2.ClusterMetrics.getDefaultInstance() : metrics_; + } else { + return metricsBuilder_.getMessage(); + } + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public Builder setMetrics(com.google.cloud.dataproc.v1beta2.ClusterMetrics value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metrics_ = value; + onChanged(); + } else { + metricsBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public Builder setMetrics( + com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder builderForValue) { + if (metricsBuilder_ == null) { + metrics_ = builderForValue.build(); + onChanged(); + } else { + metricsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public Builder mergeMetrics(com.google.cloud.dataproc.v1beta2.ClusterMetrics value) { + if (metricsBuilder_ == null) { + if (metrics_ != null) { + metrics_ = + com.google.cloud.dataproc.v1beta2.ClusterMetrics.newBuilder(metrics_).mergeFrom(value).buildPartial(); + } else { + metrics_ = value; + } + onChanged(); + } else { + metricsBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public Builder clearMetrics() { + if (metricsBuilder_ == null) { + metrics_ = null; + onChanged(); + } else { + metrics_ = null; + metricsBuilder_ = null; + } + + return this; + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder getMetricsBuilder() { + + onChanged(); + return getMetricsFieldBuilder().getBuilder(); + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder getMetricsOrBuilder() { + if (metricsBuilder_ != null) { + return metricsBuilder_.getMessageOrBuilder(); + } else { + return metrics_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterMetrics.getDefaultInstance() : metrics_; + } + } + /** + *
+     * Contains cluster daemon metrics such as HDFS and YARN stats.
+     * **Beta Feature**: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterMetrics, com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder, com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder> + getMetricsFieldBuilder() { + if (metricsBuilder_ == null) { + metricsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterMetrics, com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder, com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder>( + getMetrics(), + getParentForChildren(), + isClean()); + metrics_ = null; + } + return metricsBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.Cluster) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.Cluster) + private static final com.google.cloud.dataproc.v1beta2.Cluster DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.Cluster(); + } + + public static com.google.cloud.dataproc.v1beta2.Cluster getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Cluster parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Cluster(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Cluster getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java new file mode 100644 index 000000000000..7bc95dfcbc9e --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java @@ -0,0 +1,2681 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The cluster config.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterConfig} + */ +public final class ClusterConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterConfig) + ClusterConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterConfig.newBuilder() to construct. + private ClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterConfig() { + configBucket_ = ""; + initializationActions_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + configBucket_ = s; + break; + } + case 66: { + com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder subBuilder = null; + if (gceClusterConfig_ != null) { + subBuilder = gceClusterConfig_.toBuilder(); + } + gceClusterConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.GceClusterConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(gceClusterConfig_); + gceClusterConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 74: { + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder subBuilder = null; + if (masterConfig_ != null) { + subBuilder = masterConfig_.toBuilder(); + } + masterConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(masterConfig_); + masterConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 82: { + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder subBuilder = null; + if (workerConfig_ != null) { + subBuilder = workerConfig_.toBuilder(); + } + workerConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(workerConfig_); + workerConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 90: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + initializationActions_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + initializationActions_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.NodeInitializationAction.parser(), extensionRegistry)); + break; + } + case 98: { + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder subBuilder = null; + if (secondaryWorkerConfig_ != null) { + subBuilder = secondaryWorkerConfig_.toBuilder(); + } + secondaryWorkerConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(secondaryWorkerConfig_); + secondaryWorkerConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 106: { + com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder subBuilder = null; + if (softwareConfig_ != null) { + subBuilder = softwareConfig_.toBuilder(); + } + softwareConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.SoftwareConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(softwareConfig_); + softwareConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 114: { + com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder subBuilder = null; + if (lifecycleConfig_ != null) { + subBuilder = lifecycleConfig_.toBuilder(); + } + lifecycleConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.LifecycleConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lifecycleConfig_); + lifecycleConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + initializationActions_ = java.util.Collections.unmodifiableList(initializationActions_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterConfig.class, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder.class); + } + + private int bitField0_; + public static final int CONFIG_BUCKET_FIELD_NUMBER = 1; + private volatile java.lang.Object configBucket_; + /** + *
+   * Optional. A Cloud Storage staging bucket used for sharing generated
+   * SSH keys and config. If you do not specify a staging bucket, Cloud
+   * Dataproc will determine an appropriate Cloud Storage location (US,
+   * ASIA, or EU) for your cluster's staging bucket according to the Google
+   * Compute Engine zone where your cluster is deployed, and then it will create
+   * and manage this project-level, per-location bucket for you.
+   * 
+ * + * string config_bucket = 1; + */ + public java.lang.String getConfigBucket() { + java.lang.Object ref = configBucket_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + configBucket_ = s; + return s; + } + } + /** + *
+   * Optional. A Cloud Storage staging bucket used for sharing generated
+   * SSH keys and config. If you do not specify a staging bucket, Cloud
+   * Dataproc will determine an appropriate Cloud Storage location (US,
+   * ASIA, or EU) for your cluster's staging bucket according to the Google
+   * Compute Engine zone where your cluster is deployed, and then it will create
+   * and manage this project-level, per-location bucket for you.
+   * 
+ * + * string config_bucket = 1; + */ + public com.google.protobuf.ByteString + getConfigBucketBytes() { + java.lang.Object ref = configBucket_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + configBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GCE_CLUSTER_CONFIG_FIELD_NUMBER = 8; + private com.google.cloud.dataproc.v1beta2.GceClusterConfig gceClusterConfig_; + /** + *
+   * Required. The shared Compute Engine config settings for
+   * all instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public boolean hasGceClusterConfig() { + return gceClusterConfig_ != null; + } + /** + *
+   * Required. The shared Compute Engine config settings for
+   * all instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.GceClusterConfig getGceClusterConfig() { + return gceClusterConfig_ == null ? com.google.cloud.dataproc.v1beta2.GceClusterConfig.getDefaultInstance() : gceClusterConfig_; + } + /** + *
+   * Required. The shared Compute Engine config settings for
+   * all instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder getGceClusterConfigOrBuilder() { + return getGceClusterConfig(); + } + + public static final int MASTER_CONFIG_FIELD_NUMBER = 9; + private com.google.cloud.dataproc.v1beta2.InstanceGroupConfig masterConfig_; + /** + *
+   * Optional. The Compute Engine config settings for
+   * the master instance in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public boolean hasMasterConfig() { + return masterConfig_ != null; + } + /** + *
+   * Optional. The Compute Engine config settings for
+   * the master instance in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getMasterConfig() { + return masterConfig_ == null ? com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : masterConfig_; + } + /** + *
+   * Optional. The Compute Engine config settings for
+   * the master instance in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getMasterConfigOrBuilder() { + return getMasterConfig(); + } + + public static final int WORKER_CONFIG_FIELD_NUMBER = 10; + private com.google.cloud.dataproc.v1beta2.InstanceGroupConfig workerConfig_; + /** + *
+   * Optional. The Compute Engine config settings for
+   * worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public boolean hasWorkerConfig() { + return workerConfig_ != null; + } + /** + *
+   * Optional. The Compute Engine config settings for
+   * worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getWorkerConfig() { + return workerConfig_ == null ? com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : workerConfig_; + } + /** + *
+   * Optional. The Compute Engine config settings for
+   * worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getWorkerConfigOrBuilder() { + return getWorkerConfig(); + } + + public static final int SECONDARY_WORKER_CONFIG_FIELD_NUMBER = 12; + private com.google.cloud.dataproc.v1beta2.InstanceGroupConfig secondaryWorkerConfig_; + /** + *
+   * Optional. The Compute Engine config settings for
+   * additional worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public boolean hasSecondaryWorkerConfig() { + return secondaryWorkerConfig_ != null; + } + /** + *
+   * Optional. The Compute Engine config settings for
+   * additional worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getSecondaryWorkerConfig() { + return secondaryWorkerConfig_ == null ? com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : secondaryWorkerConfig_; + } + /** + *
+   * Optional. The Compute Engine config settings for
+   * additional worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getSecondaryWorkerConfigOrBuilder() { + return getSecondaryWorkerConfig(); + } + + public static final int SOFTWARE_CONFIG_FIELD_NUMBER = 13; + private com.google.cloud.dataproc.v1beta2.SoftwareConfig softwareConfig_; + /** + *
+   * Optional. The config settings for software inside the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public boolean hasSoftwareConfig() { + return softwareConfig_ != null; + } + /** + *
+   * Optional. The config settings for software inside the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public com.google.cloud.dataproc.v1beta2.SoftwareConfig getSoftwareConfig() { + return softwareConfig_ == null ? com.google.cloud.dataproc.v1beta2.SoftwareConfig.getDefaultInstance() : softwareConfig_; + } + /** + *
+   * Optional. The config settings for software inside the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder getSoftwareConfigOrBuilder() { + return getSoftwareConfig(); + } + + public static final int LIFECYCLE_CONFIG_FIELD_NUMBER = 14; + private com.google.cloud.dataproc.v1beta2.LifecycleConfig lifecycleConfig_; + /** + *
+   * Optional. The config setting for auto delete cluster schedule.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public boolean hasLifecycleConfig() { + return lifecycleConfig_ != null; + } + /** + *
+   * Optional. The config setting for auto delete cluster schedule.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public com.google.cloud.dataproc.v1beta2.LifecycleConfig getLifecycleConfig() { + return lifecycleConfig_ == null ? com.google.cloud.dataproc.v1beta2.LifecycleConfig.getDefaultInstance() : lifecycleConfig_; + } + /** + *
+   * Optional. The config setting for auto delete cluster schedule.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder() { + return getLifecycleConfig(); + } + + public static final int INITIALIZATION_ACTIONS_FIELD_NUMBER = 11; + private java.util.List initializationActions_; + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public java.util.List getInitializationActionsList() { + return initializationActions_; + } + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public java.util.List + getInitializationActionsOrBuilderList() { + return initializationActions_; + } + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public int getInitializationActionsCount() { + return initializationActions_.size(); + } + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction getInitializationActions(int index) { + return initializationActions_.get(index); + } + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder getInitializationActionsOrBuilder( + int index) { + return initializationActions_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getConfigBucketBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, configBucket_); + } + if (gceClusterConfig_ != null) { + output.writeMessage(8, getGceClusterConfig()); + } + if (masterConfig_ != null) { + output.writeMessage(9, getMasterConfig()); + } + if (workerConfig_ != null) { + output.writeMessage(10, getWorkerConfig()); + } + for (int i = 0; i < initializationActions_.size(); i++) { + output.writeMessage(11, initializationActions_.get(i)); + } + if (secondaryWorkerConfig_ != null) { + output.writeMessage(12, getSecondaryWorkerConfig()); + } + if (softwareConfig_ != null) { + output.writeMessage(13, getSoftwareConfig()); + } + if (lifecycleConfig_ != null) { + output.writeMessage(14, getLifecycleConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getConfigBucketBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, configBucket_); + } + if (gceClusterConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, getGceClusterConfig()); + } + if (masterConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, getMasterConfig()); + } + if (workerConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, getWorkerConfig()); + } + for (int i = 0; i < initializationActions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(11, initializationActions_.get(i)); + } + if (secondaryWorkerConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(12, getSecondaryWorkerConfig()); + } + if (softwareConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(13, getSoftwareConfig()); + } + if (lifecycleConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(14, getLifecycleConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterConfig other = (com.google.cloud.dataproc.v1beta2.ClusterConfig) obj; + + boolean result = true; + result = result && getConfigBucket() + .equals(other.getConfigBucket()); + result = result && (hasGceClusterConfig() == other.hasGceClusterConfig()); + if (hasGceClusterConfig()) { + result = result && getGceClusterConfig() + .equals(other.getGceClusterConfig()); + } + result = result && (hasMasterConfig() == other.hasMasterConfig()); + if (hasMasterConfig()) { + result = result && getMasterConfig() + .equals(other.getMasterConfig()); + } + result = result && (hasWorkerConfig() == other.hasWorkerConfig()); + if (hasWorkerConfig()) { + result = result && getWorkerConfig() + .equals(other.getWorkerConfig()); + } + result = result && (hasSecondaryWorkerConfig() == other.hasSecondaryWorkerConfig()); + if (hasSecondaryWorkerConfig()) { + result = result && getSecondaryWorkerConfig() + .equals(other.getSecondaryWorkerConfig()); + } + result = result && (hasSoftwareConfig() == other.hasSoftwareConfig()); + if (hasSoftwareConfig()) { + result = result && getSoftwareConfig() + .equals(other.getSoftwareConfig()); + } + result = result && (hasLifecycleConfig() == other.hasLifecycleConfig()); + if (hasLifecycleConfig()) { + result = result && getLifecycleConfig() + .equals(other.getLifecycleConfig()); + } + result = result && getInitializationActionsList() + .equals(other.getInitializationActionsList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CONFIG_BUCKET_FIELD_NUMBER; + hash = (53 * hash) + getConfigBucket().hashCode(); + if (hasGceClusterConfig()) { + hash = (37 * hash) + GCE_CLUSTER_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getGceClusterConfig().hashCode(); + } + if (hasMasterConfig()) { + hash = (37 * hash) + MASTER_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getMasterConfig().hashCode(); + } + if (hasWorkerConfig()) { + hash = (37 * hash) + WORKER_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getWorkerConfig().hashCode(); + } + if (hasSecondaryWorkerConfig()) { + hash = (37 * hash) + SECONDARY_WORKER_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getSecondaryWorkerConfig().hashCode(); + } + if (hasSoftwareConfig()) { + hash = (37 * hash) + SOFTWARE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getSoftwareConfig().hashCode(); + } + if (hasLifecycleConfig()) { + hash = (37 * hash) + LIFECYCLE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLifecycleConfig().hashCode(); + } + if (getInitializationActionsCount() > 0) { + hash = (37 * hash) + INITIALIZATION_ACTIONS_FIELD_NUMBER; + hash = (53 * hash) + getInitializationActionsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The cluster config.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterConfig) + com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterConfig.class, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getInitializationActionsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + configBucket_ = ""; + + if (gceClusterConfigBuilder_ == null) { + gceClusterConfig_ = null; + } else { + gceClusterConfig_ = null; + gceClusterConfigBuilder_ = null; + } + if (masterConfigBuilder_ == null) { + masterConfig_ = null; + } else { + masterConfig_ = null; + masterConfigBuilder_ = null; + } + if (workerConfigBuilder_ == null) { + workerConfig_ = null; + } else { + workerConfig_ = null; + workerConfigBuilder_ = null; + } + if (secondaryWorkerConfigBuilder_ == null) { + secondaryWorkerConfig_ = null; + } else { + secondaryWorkerConfig_ = null; + secondaryWorkerConfigBuilder_ = null; + } + if (softwareConfigBuilder_ == null) { + softwareConfig_ = null; + } else { + softwareConfig_ = null; + softwareConfigBuilder_ = null; + } + if (lifecycleConfigBuilder_ == null) { + lifecycleConfig_ = null; + } else { + lifecycleConfig_ = null; + lifecycleConfigBuilder_ = null; + } + if (initializationActionsBuilder_ == null) { + initializationActions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + initializationActionsBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterConfig build() { + com.google.cloud.dataproc.v1beta2.ClusterConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterConfig result = new com.google.cloud.dataproc.v1beta2.ClusterConfig(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.configBucket_ = configBucket_; + if (gceClusterConfigBuilder_ == null) { + result.gceClusterConfig_ = gceClusterConfig_; + } else { + result.gceClusterConfig_ = gceClusterConfigBuilder_.build(); + } + if (masterConfigBuilder_ == null) { + result.masterConfig_ = masterConfig_; + } else { + result.masterConfig_ = masterConfigBuilder_.build(); + } + if (workerConfigBuilder_ == null) { + result.workerConfig_ = workerConfig_; + } else { + result.workerConfig_ = workerConfigBuilder_.build(); + } + if (secondaryWorkerConfigBuilder_ == null) { + result.secondaryWorkerConfig_ = secondaryWorkerConfig_; + } else { + result.secondaryWorkerConfig_ = secondaryWorkerConfigBuilder_.build(); + } + if (softwareConfigBuilder_ == null) { + result.softwareConfig_ = softwareConfig_; + } else { + result.softwareConfig_ = softwareConfigBuilder_.build(); + } + if (lifecycleConfigBuilder_ == null) { + result.lifecycleConfig_ = lifecycleConfig_; + } else { + result.lifecycleConfig_ = lifecycleConfigBuilder_.build(); + } + if (initializationActionsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + initializationActions_ = java.util.Collections.unmodifiableList(initializationActions_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.initializationActions_ = initializationActions_; + } else { + result.initializationActions_ = initializationActionsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance()) return this; + if (!other.getConfigBucket().isEmpty()) { + configBucket_ = other.configBucket_; + onChanged(); + } + if (other.hasGceClusterConfig()) { + mergeGceClusterConfig(other.getGceClusterConfig()); + } + if (other.hasMasterConfig()) { + mergeMasterConfig(other.getMasterConfig()); + } + if (other.hasWorkerConfig()) { + mergeWorkerConfig(other.getWorkerConfig()); + } + if (other.hasSecondaryWorkerConfig()) { + mergeSecondaryWorkerConfig(other.getSecondaryWorkerConfig()); + } + if (other.hasSoftwareConfig()) { + mergeSoftwareConfig(other.getSoftwareConfig()); + } + if (other.hasLifecycleConfig()) { + mergeLifecycleConfig(other.getLifecycleConfig()); + } + if (initializationActionsBuilder_ == null) { + if (!other.initializationActions_.isEmpty()) { + if (initializationActions_.isEmpty()) { + initializationActions_ = other.initializationActions_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureInitializationActionsIsMutable(); + initializationActions_.addAll(other.initializationActions_); + } + onChanged(); + } + } else { + if (!other.initializationActions_.isEmpty()) { + if (initializationActionsBuilder_.isEmpty()) { + initializationActionsBuilder_.dispose(); + initializationActionsBuilder_ = null; + initializationActions_ = other.initializationActions_; + bitField0_ = (bitField0_ & ~0x00000080); + initializationActionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getInitializationActionsFieldBuilder() : null; + } else { + initializationActionsBuilder_.addAllMessages(other.initializationActions_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object configBucket_ = ""; + /** + *
+     * Optional. A Cloud Storage staging bucket used for sharing generated
+     * SSH keys and config. If you do not specify a staging bucket, Cloud
+     * Dataproc will determine an appropriate Cloud Storage location (US,
+     * ASIA, or EU) for your cluster's staging bucket according to the Google
+     * Compute Engine zone where your cluster is deployed, and then it will create
+     * and manage this project-level, per-location bucket for you.
+     * 
+ * + * string config_bucket = 1; + */ + public java.lang.String getConfigBucket() { + java.lang.Object ref = configBucket_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + configBucket_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A Cloud Storage staging bucket used for sharing generated
+     * SSH keys and config. If you do not specify a staging bucket, Cloud
+     * Dataproc will determine an appropriate Cloud Storage location (US,
+     * ASIA, or EU) for your cluster's staging bucket according to the Google
+     * Compute Engine zone where your cluster is deployed, and then it will create
+     * and manage this project-level, per-location bucket for you.
+     * 
+ * + * string config_bucket = 1; + */ + public com.google.protobuf.ByteString + getConfigBucketBytes() { + java.lang.Object ref = configBucket_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + configBucket_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A Cloud Storage staging bucket used for sharing generated
+     * SSH keys and config. If you do not specify a staging bucket, Cloud
+     * Dataproc will determine an appropriate Cloud Storage location (US,
+     * ASIA, or EU) for your cluster's staging bucket according to the Google
+     * Compute Engine zone where your cluster is deployed, and then it will create
+     * and manage this project-level, per-location bucket for you.
+     * 
+ * + * string config_bucket = 1; + */ + public Builder setConfigBucket( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + configBucket_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A Cloud Storage staging bucket used for sharing generated
+     * SSH keys and config. If you do not specify a staging bucket, Cloud
+     * Dataproc will determine an appropriate Cloud Storage location (US,
+     * ASIA, or EU) for your cluster's staging bucket according to the Google
+     * Compute Engine zone where your cluster is deployed, and then it will create
+     * and manage this project-level, per-location bucket for you.
+     * 
+ * + * string config_bucket = 1; + */ + public Builder clearConfigBucket() { + + configBucket_ = getDefaultInstance().getConfigBucket(); + onChanged(); + return this; + } + /** + *
+     * Optional. A Cloud Storage staging bucket used for sharing generated
+     * SSH keys and config. If you do not specify a staging bucket, Cloud
+     * Dataproc will determine an appropriate Cloud Storage location (US,
+     * ASIA, or EU) for your cluster's staging bucket according to the Google
+     * Compute Engine zone where your cluster is deployed, and then it will create
+     * and manage this project-level, per-location bucket for you.
+     * 
+ * + * string config_bucket = 1; + */ + public Builder setConfigBucketBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + configBucket_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.GceClusterConfig gceClusterConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.GceClusterConfig, com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder> gceClusterConfigBuilder_; + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public boolean hasGceClusterConfig() { + return gceClusterConfigBuilder_ != null || gceClusterConfig_ != null; + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.GceClusterConfig getGceClusterConfig() { + if (gceClusterConfigBuilder_ == null) { + return gceClusterConfig_ == null ? com.google.cloud.dataproc.v1beta2.GceClusterConfig.getDefaultInstance() : gceClusterConfig_; + } else { + return gceClusterConfigBuilder_.getMessage(); + } + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public Builder setGceClusterConfig(com.google.cloud.dataproc.v1beta2.GceClusterConfig value) { + if (gceClusterConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + gceClusterConfig_ = value; + onChanged(); + } else { + gceClusterConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public Builder setGceClusterConfig( + com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder builderForValue) { + if (gceClusterConfigBuilder_ == null) { + gceClusterConfig_ = builderForValue.build(); + onChanged(); + } else { + gceClusterConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public Builder mergeGceClusterConfig(com.google.cloud.dataproc.v1beta2.GceClusterConfig value) { + if (gceClusterConfigBuilder_ == null) { + if (gceClusterConfig_ != null) { + gceClusterConfig_ = + com.google.cloud.dataproc.v1beta2.GceClusterConfig.newBuilder(gceClusterConfig_).mergeFrom(value).buildPartial(); + } else { + gceClusterConfig_ = value; + } + onChanged(); + } else { + gceClusterConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public Builder clearGceClusterConfig() { + if (gceClusterConfigBuilder_ == null) { + gceClusterConfig_ = null; + onChanged(); + } else { + gceClusterConfig_ = null; + gceClusterConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder getGceClusterConfigBuilder() { + + onChanged(); + return getGceClusterConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder getGceClusterConfigOrBuilder() { + if (gceClusterConfigBuilder_ != null) { + return gceClusterConfigBuilder_.getMessageOrBuilder(); + } else { + return gceClusterConfig_ == null ? + com.google.cloud.dataproc.v1beta2.GceClusterConfig.getDefaultInstance() : gceClusterConfig_; + } + } + /** + *
+     * Required. The shared Compute Engine config settings for
+     * all instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.GceClusterConfig, com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder> + getGceClusterConfigFieldBuilder() { + if (gceClusterConfigBuilder_ == null) { + gceClusterConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.GceClusterConfig, com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder>( + getGceClusterConfig(), + getParentForChildren(), + isClean()); + gceClusterConfig_ = null; + } + return gceClusterConfigBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.InstanceGroupConfig masterConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder> masterConfigBuilder_; + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public boolean hasMasterConfig() { + return masterConfigBuilder_ != null || masterConfig_ != null; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getMasterConfig() { + if (masterConfigBuilder_ == null) { + return masterConfig_ == null ? com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : masterConfig_; + } else { + return masterConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public Builder setMasterConfig(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig value) { + if (masterConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + masterConfig_ = value; + onChanged(); + } else { + masterConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public Builder setMasterConfig( + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder builderForValue) { + if (masterConfigBuilder_ == null) { + masterConfig_ = builderForValue.build(); + onChanged(); + } else { + masterConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public Builder mergeMasterConfig(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig value) { + if (masterConfigBuilder_ == null) { + if (masterConfig_ != null) { + masterConfig_ = + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.newBuilder(masterConfig_).mergeFrom(value).buildPartial(); + } else { + masterConfig_ = value; + } + onChanged(); + } else { + masterConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public Builder clearMasterConfig() { + if (masterConfigBuilder_ == null) { + masterConfig_ = null; + onChanged(); + } else { + masterConfig_ = null; + masterConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder getMasterConfigBuilder() { + + onChanged(); + return getMasterConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getMasterConfigOrBuilder() { + if (masterConfigBuilder_ != null) { + return masterConfigBuilder_.getMessageOrBuilder(); + } else { + return masterConfig_ == null ? + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : masterConfig_; + } + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * the master instance in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder> + getMasterConfigFieldBuilder() { + if (masterConfigBuilder_ == null) { + masterConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder>( + getMasterConfig(), + getParentForChildren(), + isClean()); + masterConfig_ = null; + } + return masterConfigBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.InstanceGroupConfig workerConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder> workerConfigBuilder_; + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public boolean hasWorkerConfig() { + return workerConfigBuilder_ != null || workerConfig_ != null; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getWorkerConfig() { + if (workerConfigBuilder_ == null) { + return workerConfig_ == null ? com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : workerConfig_; + } else { + return workerConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public Builder setWorkerConfig(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig value) { + if (workerConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + workerConfig_ = value; + onChanged(); + } else { + workerConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public Builder setWorkerConfig( + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder builderForValue) { + if (workerConfigBuilder_ == null) { + workerConfig_ = builderForValue.build(); + onChanged(); + } else { + workerConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public Builder mergeWorkerConfig(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig value) { + if (workerConfigBuilder_ == null) { + if (workerConfig_ != null) { + workerConfig_ = + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.newBuilder(workerConfig_).mergeFrom(value).buildPartial(); + } else { + workerConfig_ = value; + } + onChanged(); + } else { + workerConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public Builder clearWorkerConfig() { + if (workerConfigBuilder_ == null) { + workerConfig_ = null; + onChanged(); + } else { + workerConfig_ = null; + workerConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder getWorkerConfigBuilder() { + + onChanged(); + return getWorkerConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getWorkerConfigOrBuilder() { + if (workerConfigBuilder_ != null) { + return workerConfigBuilder_.getMessageOrBuilder(); + } else { + return workerConfig_ == null ? + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : workerConfig_; + } + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder> + getWorkerConfigFieldBuilder() { + if (workerConfigBuilder_ == null) { + workerConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder>( + getWorkerConfig(), + getParentForChildren(), + isClean()); + workerConfig_ = null; + } + return workerConfigBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.InstanceGroupConfig secondaryWorkerConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder> secondaryWorkerConfigBuilder_; + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public boolean hasSecondaryWorkerConfig() { + return secondaryWorkerConfigBuilder_ != null || secondaryWorkerConfig_ != null; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getSecondaryWorkerConfig() { + if (secondaryWorkerConfigBuilder_ == null) { + return secondaryWorkerConfig_ == null ? com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : secondaryWorkerConfig_; + } else { + return secondaryWorkerConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public Builder setSecondaryWorkerConfig(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig value) { + if (secondaryWorkerConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + secondaryWorkerConfig_ = value; + onChanged(); + } else { + secondaryWorkerConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public Builder setSecondaryWorkerConfig( + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder builderForValue) { + if (secondaryWorkerConfigBuilder_ == null) { + secondaryWorkerConfig_ = builderForValue.build(); + onChanged(); + } else { + secondaryWorkerConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public Builder mergeSecondaryWorkerConfig(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig value) { + if (secondaryWorkerConfigBuilder_ == null) { + if (secondaryWorkerConfig_ != null) { + secondaryWorkerConfig_ = + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.newBuilder(secondaryWorkerConfig_).mergeFrom(value).buildPartial(); + } else { + secondaryWorkerConfig_ = value; + } + onChanged(); + } else { + secondaryWorkerConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public Builder clearSecondaryWorkerConfig() { + if (secondaryWorkerConfigBuilder_ == null) { + secondaryWorkerConfig_ = null; + onChanged(); + } else { + secondaryWorkerConfig_ = null; + secondaryWorkerConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder getSecondaryWorkerConfigBuilder() { + + onChanged(); + return getSecondaryWorkerConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getSecondaryWorkerConfigOrBuilder() { + if (secondaryWorkerConfigBuilder_ != null) { + return secondaryWorkerConfigBuilder_.getMessageOrBuilder(); + } else { + return secondaryWorkerConfig_ == null ? + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance() : secondaryWorkerConfig_; + } + } + /** + *
+     * Optional. The Compute Engine config settings for
+     * additional worker instances in a cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder> + getSecondaryWorkerConfigFieldBuilder() { + if (secondaryWorkerConfigBuilder_ == null) { + secondaryWorkerConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder>( + getSecondaryWorkerConfig(), + getParentForChildren(), + isClean()); + secondaryWorkerConfig_ = null; + } + return secondaryWorkerConfigBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.SoftwareConfig softwareConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SoftwareConfig, com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder, com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder> softwareConfigBuilder_; + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public boolean hasSoftwareConfig() { + return softwareConfigBuilder_ != null || softwareConfig_ != null; + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public com.google.cloud.dataproc.v1beta2.SoftwareConfig getSoftwareConfig() { + if (softwareConfigBuilder_ == null) { + return softwareConfig_ == null ? com.google.cloud.dataproc.v1beta2.SoftwareConfig.getDefaultInstance() : softwareConfig_; + } else { + return softwareConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public Builder setSoftwareConfig(com.google.cloud.dataproc.v1beta2.SoftwareConfig value) { + if (softwareConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + softwareConfig_ = value; + onChanged(); + } else { + softwareConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public Builder setSoftwareConfig( + com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder builderForValue) { + if (softwareConfigBuilder_ == null) { + softwareConfig_ = builderForValue.build(); + onChanged(); + } else { + softwareConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public Builder mergeSoftwareConfig(com.google.cloud.dataproc.v1beta2.SoftwareConfig value) { + if (softwareConfigBuilder_ == null) { + if (softwareConfig_ != null) { + softwareConfig_ = + com.google.cloud.dataproc.v1beta2.SoftwareConfig.newBuilder(softwareConfig_).mergeFrom(value).buildPartial(); + } else { + softwareConfig_ = value; + } + onChanged(); + } else { + softwareConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public Builder clearSoftwareConfig() { + if (softwareConfigBuilder_ == null) { + softwareConfig_ = null; + onChanged(); + } else { + softwareConfig_ = null; + softwareConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder getSoftwareConfigBuilder() { + + onChanged(); + return getSoftwareConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + public com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder getSoftwareConfigOrBuilder() { + if (softwareConfigBuilder_ != null) { + return softwareConfigBuilder_.getMessageOrBuilder(); + } else { + return softwareConfig_ == null ? + com.google.cloud.dataproc.v1beta2.SoftwareConfig.getDefaultInstance() : softwareConfig_; + } + } + /** + *
+     * Optional. The config settings for software inside the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SoftwareConfig, com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder, com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder> + getSoftwareConfigFieldBuilder() { + if (softwareConfigBuilder_ == null) { + softwareConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SoftwareConfig, com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder, com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder>( + getSoftwareConfig(), + getParentForChildren(), + isClean()); + softwareConfig_ = null; + } + return softwareConfigBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.LifecycleConfig lifecycleConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LifecycleConfig, com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder, com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder> lifecycleConfigBuilder_; + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public boolean hasLifecycleConfig() { + return lifecycleConfigBuilder_ != null || lifecycleConfig_ != null; + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public com.google.cloud.dataproc.v1beta2.LifecycleConfig getLifecycleConfig() { + if (lifecycleConfigBuilder_ == null) { + return lifecycleConfig_ == null ? com.google.cloud.dataproc.v1beta2.LifecycleConfig.getDefaultInstance() : lifecycleConfig_; + } else { + return lifecycleConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public Builder setLifecycleConfig(com.google.cloud.dataproc.v1beta2.LifecycleConfig value) { + if (lifecycleConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lifecycleConfig_ = value; + onChanged(); + } else { + lifecycleConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public Builder setLifecycleConfig( + com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder builderForValue) { + if (lifecycleConfigBuilder_ == null) { + lifecycleConfig_ = builderForValue.build(); + onChanged(); + } else { + lifecycleConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public Builder mergeLifecycleConfig(com.google.cloud.dataproc.v1beta2.LifecycleConfig value) { + if (lifecycleConfigBuilder_ == null) { + if (lifecycleConfig_ != null) { + lifecycleConfig_ = + com.google.cloud.dataproc.v1beta2.LifecycleConfig.newBuilder(lifecycleConfig_).mergeFrom(value).buildPartial(); + } else { + lifecycleConfig_ = value; + } + onChanged(); + } else { + lifecycleConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public Builder clearLifecycleConfig() { + if (lifecycleConfigBuilder_ == null) { + lifecycleConfig_ = null; + onChanged(); + } else { + lifecycleConfig_ = null; + lifecycleConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder getLifecycleConfigBuilder() { + + onChanged(); + return getLifecycleConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + public com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder() { + if (lifecycleConfigBuilder_ != null) { + return lifecycleConfigBuilder_.getMessageOrBuilder(); + } else { + return lifecycleConfig_ == null ? + com.google.cloud.dataproc.v1beta2.LifecycleConfig.getDefaultInstance() : lifecycleConfig_; + } + } + /** + *
+     * Optional. The config setting for auto delete cluster schedule.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LifecycleConfig, com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder, com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder> + getLifecycleConfigFieldBuilder() { + if (lifecycleConfigBuilder_ == null) { + lifecycleConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LifecycleConfig, com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder, com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder>( + getLifecycleConfig(), + getParentForChildren(), + isClean()); + lifecycleConfig_ = null; + } + return lifecycleConfigBuilder_; + } + + private java.util.List initializationActions_ = + java.util.Collections.emptyList(); + private void ensureInitializationActionsIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + initializationActions_ = new java.util.ArrayList(initializationActions_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.NodeInitializationAction, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder, com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder> initializationActionsBuilder_; + + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public java.util.List getInitializationActionsList() { + if (initializationActionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(initializationActions_); + } else { + return initializationActionsBuilder_.getMessageList(); + } + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public int getInitializationActionsCount() { + if (initializationActionsBuilder_ == null) { + return initializationActions_.size(); + } else { + return initializationActionsBuilder_.getCount(); + } + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction getInitializationActions(int index) { + if (initializationActionsBuilder_ == null) { + return initializationActions_.get(index); + } else { + return initializationActionsBuilder_.getMessage(index); + } + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder setInitializationActions( + int index, com.google.cloud.dataproc.v1beta2.NodeInitializationAction value) { + if (initializationActionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInitializationActionsIsMutable(); + initializationActions_.set(index, value); + onChanged(); + } else { + initializationActionsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder setInitializationActions( + int index, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder builderForValue) { + if (initializationActionsBuilder_ == null) { + ensureInitializationActionsIsMutable(); + initializationActions_.set(index, builderForValue.build()); + onChanged(); + } else { + initializationActionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder addInitializationActions(com.google.cloud.dataproc.v1beta2.NodeInitializationAction value) { + if (initializationActionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInitializationActionsIsMutable(); + initializationActions_.add(value); + onChanged(); + } else { + initializationActionsBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder addInitializationActions( + int index, com.google.cloud.dataproc.v1beta2.NodeInitializationAction value) { + if (initializationActionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInitializationActionsIsMutable(); + initializationActions_.add(index, value); + onChanged(); + } else { + initializationActionsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder addInitializationActions( + com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder builderForValue) { + if (initializationActionsBuilder_ == null) { + ensureInitializationActionsIsMutable(); + initializationActions_.add(builderForValue.build()); + onChanged(); + } else { + initializationActionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder addInitializationActions( + int index, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder builderForValue) { + if (initializationActionsBuilder_ == null) { + ensureInitializationActionsIsMutable(); + initializationActions_.add(index, builderForValue.build()); + onChanged(); + } else { + initializationActionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder addAllInitializationActions( + java.lang.Iterable values) { + if (initializationActionsBuilder_ == null) { + ensureInitializationActionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, initializationActions_); + onChanged(); + } else { + initializationActionsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder clearInitializationActions() { + if (initializationActionsBuilder_ == null) { + initializationActions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + initializationActionsBuilder_.clear(); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public Builder removeInitializationActions(int index) { + if (initializationActionsBuilder_ == null) { + ensureInitializationActionsIsMutable(); + initializationActions_.remove(index); + onChanged(); + } else { + initializationActionsBuilder_.remove(index); + } + return this; + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder getInitializationActionsBuilder( + int index) { + return getInitializationActionsFieldBuilder().getBuilder(index); + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder getInitializationActionsOrBuilder( + int index) { + if (initializationActionsBuilder_ == null) { + return initializationActions_.get(index); } else { + return initializationActionsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public java.util.List + getInitializationActionsOrBuilderList() { + if (initializationActionsBuilder_ != null) { + return initializationActionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(initializationActions_); + } + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder addInitializationActionsBuilder() { + return getInitializationActionsFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.NodeInitializationAction.getDefaultInstance()); + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder addInitializationActionsBuilder( + int index) { + return getInitializationActionsFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.getDefaultInstance()); + } + /** + *
+     * Optional. Commands to execute on each node after config is
+     * completed. By default, executables are run on master and all worker nodes.
+     * You can test a node's <code>role</code> metadata to run an executable on
+     * a master or worker node, as shown below using `curl` (you can also use `wget`):
+     *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+     *     if [[ "${ROLE}" == 'Master' ]]; then
+     *       ... master specific actions ...
+     *     else
+     *       ... worker specific actions ...
+     *     fi
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + public java.util.List + getInitializationActionsBuilderList() { + return getInitializationActionsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.NodeInitializationAction, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder, com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder> + getInitializationActionsFieldBuilder() { + if (initializationActionsBuilder_ == null) { + initializationActionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.NodeInitializationAction, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder, com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder>( + initializationActions_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + initializationActions_ = null; + } + return initializationActionsBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterConfig) + private static final com.google.cloud.dataproc.v1beta2.ClusterConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java new file mode 100644 index 000000000000..f43a86d5cf11 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java @@ -0,0 +1,288 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. A Cloud Storage staging bucket used for sharing generated
+   * SSH keys and config. If you do not specify a staging bucket, Cloud
+   * Dataproc will determine an appropriate Cloud Storage location (US,
+   * ASIA, or EU) for your cluster's staging bucket according to the Google
+   * Compute Engine zone where your cluster is deployed, and then it will create
+   * and manage this project-level, per-location bucket for you.
+   * 
+ * + * string config_bucket = 1; + */ + java.lang.String getConfigBucket(); + /** + *
+   * Optional. A Cloud Storage staging bucket used for sharing generated
+   * SSH keys and config. If you do not specify a staging bucket, Cloud
+   * Dataproc will determine an appropriate Cloud Storage location (US,
+   * ASIA, or EU) for your cluster's staging bucket according to the Google
+   * Compute Engine zone where your cluster is deployed, and then it will create
+   * and manage this project-level, per-location bucket for you.
+   * 
+ * + * string config_bucket = 1; + */ + com.google.protobuf.ByteString + getConfigBucketBytes(); + + /** + *
+   * Required. The shared Compute Engine config settings for
+   * all instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + boolean hasGceClusterConfig(); + /** + *
+   * Required. The shared Compute Engine config settings for
+   * all instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + com.google.cloud.dataproc.v1beta2.GceClusterConfig getGceClusterConfig(); + /** + *
+   * Required. The shared Compute Engine config settings for
+   * all instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.GceClusterConfig gce_cluster_config = 8; + */ + com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder getGceClusterConfigOrBuilder(); + + /** + *
+   * Optional. The Compute Engine config settings for
+   * the master instance in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + boolean hasMasterConfig(); + /** + *
+   * Optional. The Compute Engine config settings for
+   * the master instance in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getMasterConfig(); + /** + *
+   * Optional. The Compute Engine config settings for
+   * the master instance in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig master_config = 9; + */ + com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getMasterConfigOrBuilder(); + + /** + *
+   * Optional. The Compute Engine config settings for
+   * worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + boolean hasWorkerConfig(); + /** + *
+   * Optional. The Compute Engine config settings for
+   * worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getWorkerConfig(); + /** + *
+   * Optional. The Compute Engine config settings for
+   * worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig worker_config = 10; + */ + com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getWorkerConfigOrBuilder(); + + /** + *
+   * Optional. The Compute Engine config settings for
+   * additional worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + boolean hasSecondaryWorkerConfig(); + /** + *
+   * Optional. The Compute Engine config settings for
+   * additional worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getSecondaryWorkerConfig(); + /** + *
+   * Optional. The Compute Engine config settings for
+   * additional worker instances in a cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.InstanceGroupConfig secondary_worker_config = 12; + */ + com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder getSecondaryWorkerConfigOrBuilder(); + + /** + *
+   * Optional. The config settings for software inside the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + boolean hasSoftwareConfig(); + /** + *
+   * Optional. The config settings for software inside the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + com.google.cloud.dataproc.v1beta2.SoftwareConfig getSoftwareConfig(); + /** + *
+   * Optional. The config settings for software inside the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SoftwareConfig software_config = 13; + */ + com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder getSoftwareConfigOrBuilder(); + + /** + *
+   * Optional. The config setting for auto delete cluster schedule.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + boolean hasLifecycleConfig(); + /** + *
+   * Optional. The config setting for auto delete cluster schedule.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + com.google.cloud.dataproc.v1beta2.LifecycleConfig getLifecycleConfig(); + /** + *
+   * Optional. The config setting for auto delete cluster schedule.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LifecycleConfig lifecycle_config = 14; + */ + com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder(); + + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + java.util.List + getInitializationActionsList(); + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + com.google.cloud.dataproc.v1beta2.NodeInitializationAction getInitializationActions(int index); + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + int getInitializationActionsCount(); + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + java.util.List + getInitializationActionsOrBuilderList(); + /** + *
+   * Optional. Commands to execute on each node after config is
+   * completed. By default, executables are run on master and all worker nodes.
+   * You can test a node's <code>role</code> metadata to run an executable on
+   * a master or worker node, as shown below using `curl` (you can also use `wget`):
+   *     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+   *     if [[ "${ROLE}" == 'Master' ]]; then
+   *       ... master specific actions ...
+   *     else
+   *       ... worker specific actions ...
+   *     fi
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.NodeInitializationAction initialization_actions = 11; + */ + com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder getInitializationActionsOrBuilder( + int index); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetrics.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetrics.java new file mode 100644 index 000000000000..c5b508a058f4 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetrics.java @@ -0,0 +1,1034 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Contains cluster daemon metrics, such as HDFS and YARN stats.
+ * **Beta Feature**: This report is available for testing purposes only. It may
+ * be changed before final release.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterMetrics} + */ +public final class ClusterMetrics extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterMetrics) + ClusterMetricsOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterMetrics.newBuilder() to construct. + private ClusterMetrics(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterMetrics() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterMetrics( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + hdfsMetrics_ = com.google.protobuf.MapField.newMapField( + HdfsMetricsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry + hdfsMetrics__ = input.readMessage( + HdfsMetricsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + hdfsMetrics_.getMutableMap().put( + hdfsMetrics__.getKey(), hdfsMetrics__.getValue()); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + yarnMetrics_ = com.google.protobuf.MapField.newMapField( + YarnMetricsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000002; + } + com.google.protobuf.MapEntry + yarnMetrics__ = input.readMessage( + YarnMetricsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + yarnMetrics_.getMutableMap().put( + yarnMetrics__.getKey(), yarnMetrics__.getValue()); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 1: + return internalGetHdfsMetrics(); + case 2: + return internalGetYarnMetrics(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterMetrics.class, com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder.class); + } + + public static final int HDFS_METRICS_FIELD_NUMBER = 1; + private static final class HdfsMetricsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.Long> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_HdfsMetricsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.INT64, + 0L); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.Long> hdfsMetrics_; + private com.google.protobuf.MapField + internalGetHdfsMetrics() { + if (hdfsMetrics_ == null) { + return com.google.protobuf.MapField.emptyMapField( + HdfsMetricsDefaultEntryHolder.defaultEntry); + } + return hdfsMetrics_; + } + + public int getHdfsMetricsCount() { + return internalGetHdfsMetrics().getMap().size(); + } + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public boolean containsHdfsMetrics( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetHdfsMetrics().getMap().containsKey(key); + } + /** + * Use {@link #getHdfsMetricsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getHdfsMetrics() { + return getHdfsMetricsMap(); + } + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public java.util.Map getHdfsMetricsMap() { + return internalGetHdfsMetrics().getMap(); + } + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public long getHdfsMetricsOrDefault( + java.lang.String key, + long defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetHdfsMetrics().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public long getHdfsMetricsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetHdfsMetrics().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int YARN_METRICS_FIELD_NUMBER = 2; + private static final class YarnMetricsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.Long> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_YarnMetricsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.INT64, + 0L); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.Long> yarnMetrics_; + private com.google.protobuf.MapField + internalGetYarnMetrics() { + if (yarnMetrics_ == null) { + return com.google.protobuf.MapField.emptyMapField( + YarnMetricsDefaultEntryHolder.defaultEntry); + } + return yarnMetrics_; + } + + public int getYarnMetricsCount() { + return internalGetYarnMetrics().getMap().size(); + } + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public boolean containsYarnMetrics( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetYarnMetrics().getMap().containsKey(key); + } + /** + * Use {@link #getYarnMetricsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getYarnMetrics() { + return getYarnMetricsMap(); + } + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public java.util.Map getYarnMetricsMap() { + return internalGetYarnMetrics().getMap(); + } + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public long getYarnMetricsOrDefault( + java.lang.String key, + long defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetYarnMetrics().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public long getYarnMetricsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetYarnMetrics().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetHdfsMetrics(), + HdfsMetricsDefaultEntryHolder.defaultEntry, + 1); + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetYarnMetrics(), + YarnMetricsDefaultEntryHolder.defaultEntry, + 2); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (java.util.Map.Entry entry + : internalGetHdfsMetrics().getMap().entrySet()) { + com.google.protobuf.MapEntry + hdfsMetrics__ = HdfsMetricsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, hdfsMetrics__); + } + for (java.util.Map.Entry entry + : internalGetYarnMetrics().getMap().entrySet()) { + com.google.protobuf.MapEntry + yarnMetrics__ = YarnMetricsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, yarnMetrics__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterMetrics)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterMetrics other = (com.google.cloud.dataproc.v1beta2.ClusterMetrics) obj; + + boolean result = true; + result = result && internalGetHdfsMetrics().equals( + other.internalGetHdfsMetrics()); + result = result && internalGetYarnMetrics().equals( + other.internalGetYarnMetrics()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetHdfsMetrics().getMap().isEmpty()) { + hash = (37 * hash) + HDFS_METRICS_FIELD_NUMBER; + hash = (53 * hash) + internalGetHdfsMetrics().hashCode(); + } + if (!internalGetYarnMetrics().getMap().isEmpty()) { + hash = (37 * hash) + YARN_METRICS_FIELD_NUMBER; + hash = (53 * hash) + internalGetYarnMetrics().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterMetrics prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Contains cluster daemon metrics, such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterMetrics} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterMetrics) + com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 1: + return internalGetHdfsMetrics(); + case 2: + return internalGetYarnMetrics(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 1: + return internalGetMutableHdfsMetrics(); + case 2: + return internalGetMutableYarnMetrics(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterMetrics.class, com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterMetrics.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + internalGetMutableHdfsMetrics().clear(); + internalGetMutableYarnMetrics().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterMetrics getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterMetrics.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterMetrics build() { + com.google.cloud.dataproc.v1beta2.ClusterMetrics result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterMetrics buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterMetrics result = new com.google.cloud.dataproc.v1beta2.ClusterMetrics(this); + int from_bitField0_ = bitField0_; + result.hdfsMetrics_ = internalGetHdfsMetrics(); + result.hdfsMetrics_.makeImmutable(); + result.yarnMetrics_ = internalGetYarnMetrics(); + result.yarnMetrics_.makeImmutable(); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterMetrics) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterMetrics)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterMetrics other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterMetrics.getDefaultInstance()) return this; + internalGetMutableHdfsMetrics().mergeFrom( + other.internalGetHdfsMetrics()); + internalGetMutableYarnMetrics().mergeFrom( + other.internalGetYarnMetrics()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterMetrics parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterMetrics) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private com.google.protobuf.MapField< + java.lang.String, java.lang.Long> hdfsMetrics_; + private com.google.protobuf.MapField + internalGetHdfsMetrics() { + if (hdfsMetrics_ == null) { + return com.google.protobuf.MapField.emptyMapField( + HdfsMetricsDefaultEntryHolder.defaultEntry); + } + return hdfsMetrics_; + } + private com.google.protobuf.MapField + internalGetMutableHdfsMetrics() { + onChanged();; + if (hdfsMetrics_ == null) { + hdfsMetrics_ = com.google.protobuf.MapField.newMapField( + HdfsMetricsDefaultEntryHolder.defaultEntry); + } + if (!hdfsMetrics_.isMutable()) { + hdfsMetrics_ = hdfsMetrics_.copy(); + } + return hdfsMetrics_; + } + + public int getHdfsMetricsCount() { + return internalGetHdfsMetrics().getMap().size(); + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public boolean containsHdfsMetrics( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetHdfsMetrics().getMap().containsKey(key); + } + /** + * Use {@link #getHdfsMetricsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getHdfsMetrics() { + return getHdfsMetricsMap(); + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public java.util.Map getHdfsMetricsMap() { + return internalGetHdfsMetrics().getMap(); + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public long getHdfsMetricsOrDefault( + java.lang.String key, + long defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetHdfsMetrics().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public long getHdfsMetricsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetHdfsMetrics().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearHdfsMetrics() { + internalGetMutableHdfsMetrics().getMutableMap() + .clear(); + return this; + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public Builder removeHdfsMetrics( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableHdfsMetrics().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableHdfsMetrics() { + return internalGetMutableHdfsMetrics().getMutableMap(); + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + public Builder putHdfsMetrics( + java.lang.String key, + long value) { + if (key == null) { throw new java.lang.NullPointerException(); } + + internalGetMutableHdfsMetrics().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * The HDFS metrics.
+     * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + public Builder putAllHdfsMetrics( + java.util.Map values) { + internalGetMutableHdfsMetrics().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.Long> yarnMetrics_; + private com.google.protobuf.MapField + internalGetYarnMetrics() { + if (yarnMetrics_ == null) { + return com.google.protobuf.MapField.emptyMapField( + YarnMetricsDefaultEntryHolder.defaultEntry); + } + return yarnMetrics_; + } + private com.google.protobuf.MapField + internalGetMutableYarnMetrics() { + onChanged();; + if (yarnMetrics_ == null) { + yarnMetrics_ = com.google.protobuf.MapField.newMapField( + YarnMetricsDefaultEntryHolder.defaultEntry); + } + if (!yarnMetrics_.isMutable()) { + yarnMetrics_ = yarnMetrics_.copy(); + } + return yarnMetrics_; + } + + public int getYarnMetricsCount() { + return internalGetYarnMetrics().getMap().size(); + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public boolean containsYarnMetrics( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetYarnMetrics().getMap().containsKey(key); + } + /** + * Use {@link #getYarnMetricsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getYarnMetrics() { + return getYarnMetricsMap(); + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public java.util.Map getYarnMetricsMap() { + return internalGetYarnMetrics().getMap(); + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public long getYarnMetricsOrDefault( + java.lang.String key, + long defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetYarnMetrics().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public long getYarnMetricsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetYarnMetrics().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearYarnMetrics() { + internalGetMutableYarnMetrics().getMutableMap() + .clear(); + return this; + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public Builder removeYarnMetrics( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableYarnMetrics().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableYarnMetrics() { + return internalGetMutableYarnMetrics().getMutableMap(); + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + public Builder putYarnMetrics( + java.lang.String key, + long value) { + if (key == null) { throw new java.lang.NullPointerException(); } + + internalGetMutableYarnMetrics().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * The YARN metrics.
+     * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + public Builder putAllYarnMetrics( + java.util.Map values) { + internalGetMutableYarnMetrics().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterMetrics) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterMetrics) + private static final com.google.cloud.dataproc.v1beta2.ClusterMetrics DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterMetrics(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterMetrics getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterMetrics parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterMetrics(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterMetrics getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetricsOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetricsOrBuilder.java new file mode 100644 index 000000000000..2d53dfe810a7 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetricsOrBuilder.java @@ -0,0 +1,117 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterMetricsOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterMetrics) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + int getHdfsMetricsCount(); + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + boolean containsHdfsMetrics( + java.lang.String key); + /** + * Use {@link #getHdfsMetricsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getHdfsMetrics(); + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + java.util.Map + getHdfsMetricsMap(); + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + long getHdfsMetricsOrDefault( + java.lang.String key, + long defaultValue); + /** + *
+   * The HDFS metrics.
+   * 
+ * + * map<string, int64> hdfs_metrics = 1; + */ + + long getHdfsMetricsOrThrow( + java.lang.String key); + + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + int getYarnMetricsCount(); + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + boolean containsYarnMetrics( + java.lang.String key); + /** + * Use {@link #getYarnMetricsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getYarnMetrics(); + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + java.util.Map + getYarnMetricsMap(); + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + long getYarnMetricsOrDefault( + java.lang.String key, + long defaultValue); + /** + *
+   * The YARN metrics.
+   * 
+ * + * map<string, int64> yarn_metrics = 2; + */ + + long getYarnMetricsOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperation.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperation.java new file mode 100644 index 000000000000..ef617f02cd08 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperation.java @@ -0,0 +1,807 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The cluster operation triggered by a workflow.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterOperation} + */ +public final class ClusterOperation extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterOperation) + ClusterOperationOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterOperation.newBuilder() to construct. + private ClusterOperation(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterOperation() { + operationId_ = ""; + error_ = ""; + done_ = false; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterOperation( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + operationId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + error_ = s; + break; + } + case 24: { + + done_ = input.readBool(); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterOperation.class, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder.class); + } + + public static final int OPERATION_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object operationId_; + /** + *
+   * Output only. The id of the cluster operation.
+   * 
+ * + * string operation_id = 1; + */ + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } + } + /** + *
+   * Output only. The id of the cluster operation.
+   * 
+ * + * string operation_id = 1; + */ + public com.google.protobuf.ByteString + getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ERROR_FIELD_NUMBER = 2; + private volatile java.lang.Object error_; + /** + *
+   * Output only. Error, if operation failed.
+   * 
+ * + * string error = 2; + */ + public java.lang.String getError() { + java.lang.Object ref = error_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + error_ = s; + return s; + } + } + /** + *
+   * Output only. Error, if operation failed.
+   * 
+ * + * string error = 2; + */ + public com.google.protobuf.ByteString + getErrorBytes() { + java.lang.Object ref = error_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + error_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DONE_FIELD_NUMBER = 3; + private boolean done_; + /** + *
+   * Output only. Indicates the operation is done.
+   * 
+ * + * bool done = 3; + */ + public boolean getDone() { + return done_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getOperationIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, operationId_); + } + if (!getErrorBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, error_); + } + if (done_ != false) { + output.writeBool(3, done_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getOperationIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, operationId_); + } + if (!getErrorBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, error_); + } + if (done_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, done_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterOperation)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterOperation other = (com.google.cloud.dataproc.v1beta2.ClusterOperation) obj; + + boolean result = true; + result = result && getOperationId() + .equals(other.getOperationId()); + result = result && getError() + .equals(other.getError()); + result = result && (getDone() + == other.getDone()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPERATION_ID_FIELD_NUMBER; + hash = (53 * hash) + getOperationId().hashCode(); + hash = (37 * hash) + ERROR_FIELD_NUMBER; + hash = (53 * hash) + getError().hashCode(); + hash = (37 * hash) + DONE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getDone()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperation parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterOperation prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The cluster operation triggered by a workflow.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterOperation} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterOperation) + com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterOperation.class, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterOperation.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + operationId_ = ""; + + error_ = ""; + + done_ = false; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperation getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperation build() { + com.google.cloud.dataproc.v1beta2.ClusterOperation result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperation buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterOperation result = new com.google.cloud.dataproc.v1beta2.ClusterOperation(this); + result.operationId_ = operationId_; + result.error_ = error_; + result.done_ = done_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterOperation) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterOperation)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterOperation other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance()) return this; + if (!other.getOperationId().isEmpty()) { + operationId_ = other.operationId_; + onChanged(); + } + if (!other.getError().isEmpty()) { + error_ = other.error_; + onChanged(); + } + if (other.getDone() != false) { + setDone(other.getDone()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterOperation parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterOperation) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object operationId_ = ""; + /** + *
+     * Output only. The id of the cluster operation.
+     * 
+ * + * string operation_id = 1; + */ + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The id of the cluster operation.
+     * 
+ * + * string operation_id = 1; + */ + public com.google.protobuf.ByteString + getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The id of the cluster operation.
+     * 
+ * + * string operation_id = 1; + */ + public Builder setOperationId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + operationId_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The id of the cluster operation.
+     * 
+ * + * string operation_id = 1; + */ + public Builder clearOperationId() { + + operationId_ = getDefaultInstance().getOperationId(); + onChanged(); + return this; + } + /** + *
+     * Output only. The id of the cluster operation.
+     * 
+ * + * string operation_id = 1; + */ + public Builder setOperationIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + operationId_ = value; + onChanged(); + return this; + } + + private java.lang.Object error_ = ""; + /** + *
+     * Output only. Error, if operation failed.
+     * 
+ * + * string error = 2; + */ + public java.lang.String getError() { + java.lang.Object ref = error_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + error_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. Error, if operation failed.
+     * 
+ * + * string error = 2; + */ + public com.google.protobuf.ByteString + getErrorBytes() { + java.lang.Object ref = error_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + error_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. Error, if operation failed.
+     * 
+ * + * string error = 2; + */ + public Builder setError( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + error_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Error, if operation failed.
+     * 
+ * + * string error = 2; + */ + public Builder clearError() { + + error_ = getDefaultInstance().getError(); + onChanged(); + return this; + } + /** + *
+     * Output only. Error, if operation failed.
+     * 
+ * + * string error = 2; + */ + public Builder setErrorBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + error_ = value; + onChanged(); + return this; + } + + private boolean done_ ; + /** + *
+     * Output only. Indicates the operation is done.
+     * 
+ * + * bool done = 3; + */ + public boolean getDone() { + return done_; + } + /** + *
+     * Output only. Indicates the operation is done.
+     * 
+ * + * bool done = 3; + */ + public Builder setDone(boolean value) { + + done_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Indicates the operation is done.
+     * 
+ * + * bool done = 3; + */ + public Builder clearDone() { + + done_ = false; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterOperation) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperation) + private static final com.google.cloud.dataproc.v1beta2.ClusterOperation DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterOperation(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterOperation getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterOperation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterOperation(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperation getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadata.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadata.java new file mode 100644 index 000000000000..281fa5617115 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadata.java @@ -0,0 +1,2250 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/operations.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Metadata describing the operation.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterOperationMetadata} + */ +public final class ClusterOperationMetadata extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterOperationMetadata) + ClusterOperationMetadataOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterOperationMetadata.newBuilder() to construct. + private ClusterOperationMetadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterOperationMetadata() { + clusterName_ = ""; + clusterUuid_ = ""; + statusHistory_ = java.util.Collections.emptyList(); + operationType_ = ""; + description_ = ""; + warnings_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterOperationMetadata( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 58: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 66: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + case 74: { + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder subBuilder = null; + if (status_ != null) { + subBuilder = status_.toBuilder(); + } + status_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(status_); + status_ = subBuilder.buildPartial(); + } + + break; + } + case 82: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + statusHistory_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + statusHistory_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.parser(), extensionRegistry)); + break; + } + case 90: { + java.lang.String s = input.readStringRequireUtf8(); + + operationType_ = s; + break; + } + case 98: { + java.lang.String s = input.readStringRequireUtf8(); + + description_ = s; + break; + } + case 106: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000040; + } + com.google.protobuf.MapEntry + labels__ = input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put( + labels__.getKey(), labels__.getValue()); + break; + } + case 114: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + warnings_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000080; + } + warnings_.add(s); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + statusHistory_ = java.util.Collections.unmodifiableList(statusHistory_); + } + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + warnings_ = warnings_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 13: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.class, com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.Builder.class); + } + + private int bitField0_; + public static final int CLUSTER_NAME_FIELD_NUMBER = 7; + private volatile java.lang.Object clusterName_; + /** + *
+   * Output only. Name of the cluster for the operation.
+   * 
+ * + * string cluster_name = 7; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Output only. Name of the cluster for the operation.
+   * 
+ * + * string cluster_name = 7; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_UUID_FIELD_NUMBER = 8; + private volatile java.lang.Object clusterUuid_; + /** + *
+   * Output only. Cluster UUID for the operation.
+   * 
+ * + * string cluster_uuid = 8; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } + } + /** + *
+   * Output only. Cluster UUID for the operation.
+   * 
+ * + * string cluster_uuid = 8; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATUS_FIELD_NUMBER = 9; + private com.google.cloud.dataproc.v1beta2.ClusterOperationStatus status_; + /** + *
+   * Output only. Current operation status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public boolean hasStatus() { + return status_ != null; + } + /** + *
+   * Output only. Current operation status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getStatus() { + return status_ == null ? com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance() : status_; + } + /** + *
+   * Output only. Current operation status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder getStatusOrBuilder() { + return getStatus(); + } + + public static final int STATUS_HISTORY_FIELD_NUMBER = 10; + private java.util.List statusHistory_; + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public java.util.List getStatusHistoryList() { + return statusHistory_; + } + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public java.util.List + getStatusHistoryOrBuilderList() { + return statusHistory_; + } + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public int getStatusHistoryCount() { + return statusHistory_.size(); + } + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getStatusHistory(int index) { + return statusHistory_.get(index); + } + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder getStatusHistoryOrBuilder( + int index) { + return statusHistory_.get(index); + } + + public static final int OPERATION_TYPE_FIELD_NUMBER = 11; + private volatile java.lang.Object operationType_; + /** + *
+   * Output only. The operation type.
+   * 
+ * + * string operation_type = 11; + */ + public java.lang.String getOperationType() { + java.lang.Object ref = operationType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationType_ = s; + return s; + } + } + /** + *
+   * Output only. The operation type.
+   * 
+ * + * string operation_type = 11; + */ + public com.google.protobuf.ByteString + getOperationTypeBytes() { + java.lang.Object ref = operationType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + operationType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DESCRIPTION_FIELD_NUMBER = 12; + private volatile java.lang.Object description_; + /** + *
+   * Output only. Short description of operation.
+   * 
+ * + * string description = 12; + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + /** + *
+   * Output only. Short description of operation.
+   * 
+ * + * string description = 12; + */ + public com.google.protobuf.ByteString + getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LABELS_FIELD_NUMBER = 13; + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int WARNINGS_FIELD_NUMBER = 14; + private com.google.protobuf.LazyStringList warnings_; + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + public com.google.protobuf.ProtocolStringList + getWarningsList() { + return warnings_; + } + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + public int getWarningsCount() { + return warnings_.size(); + } + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + public java.lang.String getWarnings(int index) { + return warnings_.get(index); + } + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + public com.google.protobuf.ByteString + getWarningsBytes(int index) { + return warnings_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 8, clusterUuid_); + } + if (status_ != null) { + output.writeMessage(9, getStatus()); + } + for (int i = 0; i < statusHistory_.size(); i++) { + output.writeMessage(10, statusHistory_.get(i)); + } + if (!getOperationTypeBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 11, operationType_); + } + if (!getDescriptionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 12, description_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetLabels(), + LabelsDefaultEntryHolder.defaultEntry, + 13); + for (int i = 0; i < warnings_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 14, warnings_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, clusterUuid_); + } + if (status_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, getStatus()); + } + for (int i = 0; i < statusHistory_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, statusHistory_.get(i)); + } + if (!getOperationTypeBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(11, operationType_); + } + if (!getDescriptionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(12, description_); + } + for (java.util.Map.Entry entry + : internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + labels__ = LabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(13, labels__); + } + { + int dataSize = 0; + for (int i = 0; i < warnings_.size(); i++) { + dataSize += computeStringSizeNoTag(warnings_.getRaw(i)); + } + size += dataSize; + size += 1 * getWarningsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata other = (com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata) obj; + + boolean result = true; + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && getClusterUuid() + .equals(other.getClusterUuid()); + result = result && (hasStatus() == other.hasStatus()); + if (hasStatus()) { + result = result && getStatus() + .equals(other.getStatus()); + } + result = result && getStatusHistoryList() + .equals(other.getStatusHistoryList()); + result = result && getOperationType() + .equals(other.getOperationType()); + result = result && getDescription() + .equals(other.getDescription()); + result = result && internalGetLabels().equals( + other.internalGetLabels()); + result = result && getWarningsList() + .equals(other.getWarningsList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; + hash = (53 * hash) + getClusterUuid().hashCode(); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (getStatusHistoryCount() > 0) { + hash = (37 * hash) + STATUS_HISTORY_FIELD_NUMBER; + hash = (53 * hash) + getStatusHistoryList().hashCode(); + } + hash = (37 * hash) + OPERATION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getOperationType().hashCode(); + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (getWarningsCount() > 0) { + hash = (37 * hash) + WARNINGS_FIELD_NUMBER; + hash = (53 * hash) + getWarningsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Metadata describing the operation.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterOperationMetadata} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterOperationMetadata) + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 13: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 13: + return internalGetMutableLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.class, com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getStatusHistoryFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + clusterName_ = ""; + + clusterUuid_ = ""; + + if (statusBuilder_ == null) { + status_ = null; + } else { + status_ = null; + statusBuilder_ = null; + } + if (statusHistoryBuilder_ == null) { + statusHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + statusHistoryBuilder_.clear(); + } + operationType_ = ""; + + description_ = ""; + + internalGetMutableLabels().clear(); + warnings_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000080); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata build() { + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata result = new com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.clusterName_ = clusterName_; + result.clusterUuid_ = clusterUuid_; + if (statusBuilder_ == null) { + result.status_ = status_; + } else { + result.status_ = statusBuilder_.build(); + } + if (statusHistoryBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + statusHistory_ = java.util.Collections.unmodifiableList(statusHistory_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.statusHistory_ = statusHistory_; + } else { + result.statusHistory_ = statusHistoryBuilder_.build(); + } + result.operationType_ = operationType_; + result.description_ = description_; + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + if (((bitField0_ & 0x00000080) == 0x00000080)) { + warnings_ = warnings_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.warnings_ = warnings_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.getDefaultInstance()) return this; + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (!other.getClusterUuid().isEmpty()) { + clusterUuid_ = other.clusterUuid_; + onChanged(); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (statusHistoryBuilder_ == null) { + if (!other.statusHistory_.isEmpty()) { + if (statusHistory_.isEmpty()) { + statusHistory_ = other.statusHistory_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureStatusHistoryIsMutable(); + statusHistory_.addAll(other.statusHistory_); + } + onChanged(); + } + } else { + if (!other.statusHistory_.isEmpty()) { + if (statusHistoryBuilder_.isEmpty()) { + statusHistoryBuilder_.dispose(); + statusHistoryBuilder_ = null; + statusHistory_ = other.statusHistory_; + bitField0_ = (bitField0_ & ~0x00000008); + statusHistoryBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getStatusHistoryFieldBuilder() : null; + } else { + statusHistoryBuilder_.addAllMessages(other.statusHistory_); + } + } + } + if (!other.getOperationType().isEmpty()) { + operationType_ = other.operationType_; + onChanged(); + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + onChanged(); + } + internalGetMutableLabels().mergeFrom( + other.internalGetLabels()); + if (!other.warnings_.isEmpty()) { + if (warnings_.isEmpty()) { + warnings_ = other.warnings_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureWarningsIsMutable(); + warnings_.addAll(other.warnings_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Output only. Name of the cluster for the operation.
+     * 
+ * + * string cluster_name = 7; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. Name of the cluster for the operation.
+     * 
+ * + * string cluster_name = 7; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. Name of the cluster for the operation.
+     * 
+ * + * string cluster_name = 7; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Name of the cluster for the operation.
+     * 
+ * + * string cluster_name = 7; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Output only. Name of the cluster for the operation.
+     * 
+ * + * string cluster_name = 7; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterUuid_ = ""; + /** + *
+     * Output only. Cluster UUID for the operation.
+     * 
+ * + * string cluster_uuid = 8; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. Cluster UUID for the operation.
+     * 
+ * + * string cluster_uuid = 8; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. Cluster UUID for the operation.
+     * 
+ * + * string cluster_uuid = 8; + */ + public Builder setClusterUuid( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterUuid_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Cluster UUID for the operation.
+     * 
+ * + * string cluster_uuid = 8; + */ + public Builder clearClusterUuid() { + + clusterUuid_ = getDefaultInstance().getClusterUuid(); + onChanged(); + return this; + } + /** + *
+     * Output only. Cluster UUID for the operation.
+     * 
+ * + * string cluster_uuid = 8; + */ + public Builder setClusterUuidBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterUuid_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ClusterOperationStatus status_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder> statusBuilder_; + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public boolean hasStatus() { + return statusBuilder_ != null || status_ != null; + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public Builder setStatus(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + onChanged(); + } else { + statusBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public Builder setStatus( + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + onChanged(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public Builder mergeStatus(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus value) { + if (statusBuilder_ == null) { + if (status_ != null) { + status_ = + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.newBuilder(status_).mergeFrom(value).buildPartial(); + } else { + status_ = value; + } + onChanged(); + } else { + statusBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public Builder clearStatus() { + if (statusBuilder_ == null) { + status_ = null; + onChanged(); + } else { + status_ = null; + statusBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder getStatusBuilder() { + + onChanged(); + return getStatusFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance() : status_; + } + } + /** + *
+     * Output only. Current operation status.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder> + getStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder>( + getStatus(), + getParentForChildren(), + isClean()); + status_ = null; + } + return statusBuilder_; + } + + private java.util.List statusHistory_ = + java.util.Collections.emptyList(); + private void ensureStatusHistoryIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + statusHistory_ = new java.util.ArrayList(statusHistory_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder> statusHistoryBuilder_; + + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public java.util.List getStatusHistoryList() { + if (statusHistoryBuilder_ == null) { + return java.util.Collections.unmodifiableList(statusHistory_); + } else { + return statusHistoryBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public int getStatusHistoryCount() { + if (statusHistoryBuilder_ == null) { + return statusHistory_.size(); + } else { + return statusHistoryBuilder_.getCount(); + } + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getStatusHistory(int index) { + if (statusHistoryBuilder_ == null) { + return statusHistory_.get(index); + } else { + return statusHistoryBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder setStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.set(index, value); + onChanged(); + } else { + statusHistoryBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder setStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.set(index, builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder addStatusHistory(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.add(value); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder addStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.add(index, value); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder addStatusHistory( + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.add(builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder addStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.add(index, builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder addAllStatusHistory( + java.lang.Iterable values) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, statusHistory_); + onChanged(); + } else { + statusHistoryBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder clearStatusHistory() { + if (statusHistoryBuilder_ == null) { + statusHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + statusHistoryBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public Builder removeStatusHistory(int index) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.remove(index); + onChanged(); + } else { + statusHistoryBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder getStatusHistoryBuilder( + int index) { + return getStatusHistoryFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder getStatusHistoryOrBuilder( + int index) { + if (statusHistoryBuilder_ == null) { + return statusHistory_.get(index); } else { + return statusHistoryBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public java.util.List + getStatusHistoryOrBuilderList() { + if (statusHistoryBuilder_ != null) { + return statusHistoryBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(statusHistory_); + } + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder addStatusHistoryBuilder() { + return getStatusHistoryFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance()); + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder addStatusHistoryBuilder( + int index) { + return getStatusHistoryFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance()); + } + /** + *
+     * Output only. The previous operation status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + public java.util.List + getStatusHistoryBuilderList() { + return getStatusHistoryFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder> + getStatusHistoryFieldBuilder() { + if (statusHistoryBuilder_ == null) { + statusHistoryBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder>( + statusHistory_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + statusHistory_ = null; + } + return statusHistoryBuilder_; + } + + private java.lang.Object operationType_ = ""; + /** + *
+     * Output only. The operation type.
+     * 
+ * + * string operation_type = 11; + */ + public java.lang.String getOperationType() { + java.lang.Object ref = operationType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The operation type.
+     * 
+ * + * string operation_type = 11; + */ + public com.google.protobuf.ByteString + getOperationTypeBytes() { + java.lang.Object ref = operationType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + operationType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The operation type.
+     * 
+ * + * string operation_type = 11; + */ + public Builder setOperationType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + operationType_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The operation type.
+     * 
+ * + * string operation_type = 11; + */ + public Builder clearOperationType() { + + operationType_ = getDefaultInstance().getOperationType(); + onChanged(); + return this; + } + /** + *
+     * Output only. The operation type.
+     * 
+ * + * string operation_type = 11; + */ + public Builder setOperationTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + operationType_ = value; + onChanged(); + return this; + } + + private java.lang.Object description_ = ""; + /** + *
+     * Output only. Short description of operation.
+     * 
+ * + * string description = 12; + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. Short description of operation.
+     * 
+ * + * string description = 12; + */ + public com.google.protobuf.ByteString + getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. Short description of operation.
+     * 
+ * + * string description = 12; + */ + public Builder setDescription( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + description_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Short description of operation.
+     * 
+ * + * string description = 12; + */ + public Builder clearDescription() { + + description_ = getDefaultInstance().getDescription(); + onChanged(); + return this; + } + /** + *
+     * Output only. Short description of operation.
+     * 
+ * + * string description = 12; + */ + public Builder setDescriptionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + description_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged();; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + + public Builder removeLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + public Builder putLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Output only. Labels associated with the operation
+     * 
+ * + * map<string, string> labels = 13; + */ + + public Builder putAllLabels( + java.util.Map values) { + internalGetMutableLabels().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.LazyStringList warnings_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureWarningsIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + warnings_ = new com.google.protobuf.LazyStringArrayList(warnings_); + bitField0_ |= 0x00000080; + } + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public com.google.protobuf.ProtocolStringList + getWarningsList() { + return warnings_.getUnmodifiableView(); + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public int getWarningsCount() { + return warnings_.size(); + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public java.lang.String getWarnings(int index) { + return warnings_.get(index); + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public com.google.protobuf.ByteString + getWarningsBytes(int index) { + return warnings_.getByteString(index); + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public Builder setWarnings( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWarningsIsMutable(); + warnings_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public Builder addWarnings( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWarningsIsMutable(); + warnings_.add(value); + onChanged(); + return this; + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public Builder addAllWarnings( + java.lang.Iterable values) { + ensureWarningsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, warnings_); + onChanged(); + return this; + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public Builder clearWarnings() { + warnings_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + /** + *
+     * Output only. Errors encountered during operation execution.
+     * 
+ * + * repeated string warnings = 14; + */ + public Builder addWarningsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWarningsIsMutable(); + warnings_.add(value); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterOperationMetadata) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperationMetadata) + private static final com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterOperationMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterOperationMetadata(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadataOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadataOrBuilder.java new file mode 100644 index 000000000000..73a27b4c646b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadataOrBuilder.java @@ -0,0 +1,239 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/operations.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterOperationMetadataOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterOperationMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. Name of the cluster for the operation.
+   * 
+ * + * string cluster_name = 7; + */ + java.lang.String getClusterName(); + /** + *
+   * Output only. Name of the cluster for the operation.
+   * 
+ * + * string cluster_name = 7; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Output only. Cluster UUID for the operation.
+   * 
+ * + * string cluster_uuid = 8; + */ + java.lang.String getClusterUuid(); + /** + *
+   * Output only. Cluster UUID for the operation.
+   * 
+ * + * string cluster_uuid = 8; + */ + com.google.protobuf.ByteString + getClusterUuidBytes(); + + /** + *
+   * Output only. Current operation status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + boolean hasStatus(); + /** + *
+   * Output only. Current operation status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getStatus(); + /** + *
+   * Output only. Current operation status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus status = 9; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder getStatusOrBuilder(); + + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + java.util.List + getStatusHistoryList(); + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getStatusHistory(int index); + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + int getStatusHistoryCount(); + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + java.util.List + getStatusHistoryOrBuilderList(); + /** + *
+   * Output only. The previous operation status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterOperationStatus status_history = 10; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder getStatusHistoryOrBuilder( + int index); + + /** + *
+   * Output only. The operation type.
+   * 
+ * + * string operation_type = 11; + */ + java.lang.String getOperationType(); + /** + *
+   * Output only. The operation type.
+   * 
+ * + * string operation_type = 11; + */ + com.google.protobuf.ByteString + getOperationTypeBytes(); + + /** + *
+   * Output only. Short description of operation.
+   * 
+ * + * string description = 12; + */ + java.lang.String getDescription(); + /** + *
+   * Output only. Short description of operation.
+   * 
+ * + * string description = 12; + */ + com.google.protobuf.ByteString + getDescriptionBytes(); + + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + int getLabelsCount(); + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + boolean containsLabels( + java.lang.String key); + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getLabels(); + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + java.util.Map + getLabelsMap(); + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + + java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Output only. Labels associated with the operation
+   * 
+ * + * map<string, string> labels = 13; + */ + + java.lang.String getLabelsOrThrow( + java.lang.String key); + + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + java.util.List + getWarningsList(); + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + int getWarningsCount(); + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + java.lang.String getWarnings(int index); + /** + *
+   * Output only. Errors encountered during operation execution.
+   * 
+ * + * repeated string warnings = 14; + */ + com.google.protobuf.ByteString + getWarningsBytes(int index); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationOrBuilder.java new file mode 100644 index 000000000000..00e03c6d2137 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationOrBuilder.java @@ -0,0 +1,54 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterOperationOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterOperation) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The id of the cluster operation.
+   * 
+ * + * string operation_id = 1; + */ + java.lang.String getOperationId(); + /** + *
+   * Output only. The id of the cluster operation.
+   * 
+ * + * string operation_id = 1; + */ + com.google.protobuf.ByteString + getOperationIdBytes(); + + /** + *
+   * Output only. Error, if operation failed.
+   * 
+ * + * string error = 2; + */ + java.lang.String getError(); + /** + *
+   * Output only. Error, if operation failed.
+   * 
+ * + * string error = 2; + */ + com.google.protobuf.ByteString + getErrorBytes(); + + /** + *
+   * Output only. Indicates the operation is done.
+   * 
+ * + * bool done = 3; + */ + boolean getDone(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatus.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatus.java new file mode 100644 index 000000000000..69f1d3e09a47 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatus.java @@ -0,0 +1,1226 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/operations.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The status of the operation.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterOperationStatus} + */ +public final class ClusterOperationStatus extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterOperationStatus) + ClusterOperationStatusOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterOperationStatus.newBuilder() to construct. + private ClusterOperationStatus(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterOperationStatus() { + state_ = 0; + innerState_ = ""; + details_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterOperationStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + innerState_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + details_ = s; + break; + } + case 34: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (stateStartTime_ != null) { + subBuilder = stateStartTime_.toBuilder(); + } + stateStartTime_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stateStartTime_); + stateStartTime_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.class, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder.class); + } + + /** + *
+   * The operation state.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.ClusterOperationStatus.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * Unused.
+     * 
+ * + * UNKNOWN = 0; + */ + UNKNOWN(0), + /** + *
+     * The operation has been created.
+     * 
+ * + * PENDING = 1; + */ + PENDING(1), + /** + *
+     * The operation is running.
+     * 
+ * + * RUNNING = 2; + */ + RUNNING(2), + /** + *
+     * The operation is done; either cancelled or completed.
+     * 
+ * + * DONE = 3; + */ + DONE(3), + UNRECOGNIZED(-1), + ; + + /** + *
+     * Unused.
+     * 
+ * + * UNKNOWN = 0; + */ + public static final int UNKNOWN_VALUE = 0; + /** + *
+     * The operation has been created.
+     * 
+ * + * PENDING = 1; + */ + public static final int PENDING_VALUE = 1; + /** + *
+     * The operation is running.
+     * 
+ * + * RUNNING = 2; + */ + public static final int RUNNING_VALUE = 2; + /** + *
+     * The operation is done; either cancelled or completed.
+     * 
+ * + * DONE = 3; + */ + public static final int DONE_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + public static State forNumber(int value) { + switch (value) { + case 0: return UNKNOWN; + case 1: return PENDING; + case 2: return RUNNING; + case 3: return DONE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + State> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.ClusterOperationStatus.State) + } + + public static final int STATE_FIELD_NUMBER = 1; + private int state_; + /** + *
+   * Output only. A message containing the operation state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public int getStateValue() { + return state_; + } + /** + *
+   * Output only. A message containing the operation state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State result = com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State.UNRECOGNIZED : result; + } + + public static final int INNER_STATE_FIELD_NUMBER = 2; + private volatile java.lang.Object innerState_; + /** + *
+   * Output only. A message containing the detailed operation state.
+   * 
+ * + * string inner_state = 2; + */ + public java.lang.String getInnerState() { + java.lang.Object ref = innerState_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + innerState_ = s; + return s; + } + } + /** + *
+   * Output only. A message containing the detailed operation state.
+   * 
+ * + * string inner_state = 2; + */ + public com.google.protobuf.ByteString + getInnerStateBytes() { + java.lang.Object ref = innerState_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + innerState_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DETAILS_FIELD_NUMBER = 3; + private volatile java.lang.Object details_; + /** + *
+   * Output only. A message containing any operation metadata details.
+   * 
+ * + * string details = 3; + */ + public java.lang.String getDetails() { + java.lang.Object ref = details_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + details_ = s; + return s; + } + } + /** + *
+   * Output only. A message containing any operation metadata details.
+   * 
+ * + * string details = 3; + */ + public com.google.protobuf.ByteString + getDetailsBytes() { + java.lang.Object ref = details_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + details_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_START_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp stateStartTime_; + /** + *
+   * Output only. The time this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public boolean hasStateStartTime() { + return stateStartTime_ != null; + } + /** + *
+   * Output only. The time this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public com.google.protobuf.Timestamp getStateStartTime() { + return stateStartTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } + /** + *
+   * Output only. The time this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + return getStateStartTime(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (state_ != com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State.UNKNOWN.getNumber()) { + output.writeEnum(1, state_); + } + if (!getInnerStateBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, innerState_); + } + if (!getDetailsBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, details_); + } + if (stateStartTime_ != null) { + output.writeMessage(4, getStateStartTime()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (state_ != com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State.UNKNOWN.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_); + } + if (!getInnerStateBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, innerState_); + } + if (!getDetailsBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, details_); + } + if (stateStartTime_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getStateStartTime()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterOperationStatus)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus other = (com.google.cloud.dataproc.v1beta2.ClusterOperationStatus) obj; + + boolean result = true; + result = result && state_ == other.state_; + result = result && getInnerState() + .equals(other.getInnerState()); + result = result && getDetails() + .equals(other.getDetails()); + result = result && (hasStateStartTime() == other.hasStateStartTime()); + if (hasStateStartTime()) { + result = result && getStateStartTime() + .equals(other.getStateStartTime()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + INNER_STATE_FIELD_NUMBER; + hash = (53 * hash) + getInnerState().hashCode(); + hash = (37 * hash) + DETAILS_FIELD_NUMBER; + hash = (53 * hash) + getDetails().hashCode(); + if (hasStateStartTime()) { + hash = (37 * hash) + STATE_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStateStartTime().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The status of the operation.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterOperationStatus} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterOperationStatus) + com.google.cloud.dataproc.v1beta2.ClusterOperationStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.class, com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + state_ = 0; + + innerState_ = ""; + + details_ = ""; + + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.OperationsProto.internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus build() { + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus result = new com.google.cloud.dataproc.v1beta2.ClusterOperationStatus(this); + result.state_ = state_; + result.innerState_ = innerState_; + result.details_ = details_; + if (stateStartTimeBuilder_ == null) { + result.stateStartTime_ = stateStartTime_; + } else { + result.stateStartTime_ = stateStartTimeBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterOperationStatus) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterOperationStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.getDefaultInstance()) return this; + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getInnerState().isEmpty()) { + innerState_ = other.innerState_; + onChanged(); + } + if (!other.getDetails().isEmpty()) { + details_ = other.details_; + onChanged(); + } + if (other.hasStateStartTime()) { + mergeStateStartTime(other.getStateStartTime()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterOperationStatus) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int state_ = 0; + /** + *
+     * Output only. A message containing the operation state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public int getStateValue() { + return state_; + } + /** + *
+     * Output only. A message containing the operation state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public Builder setStateValue(int value) { + state_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A message containing the operation state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State result = com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State.UNRECOGNIZED : result; + } + /** + *
+     * Output only. A message containing the operation state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public Builder setState(com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. A message containing the operation state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object innerState_ = ""; + /** + *
+     * Output only. A message containing the detailed operation state.
+     * 
+ * + * string inner_state = 2; + */ + public java.lang.String getInnerState() { + java.lang.Object ref = innerState_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + innerState_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. A message containing the detailed operation state.
+     * 
+ * + * string inner_state = 2; + */ + public com.google.protobuf.ByteString + getInnerStateBytes() { + java.lang.Object ref = innerState_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + innerState_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. A message containing the detailed operation state.
+     * 
+ * + * string inner_state = 2; + */ + public Builder setInnerState( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + innerState_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A message containing the detailed operation state.
+     * 
+ * + * string inner_state = 2; + */ + public Builder clearInnerState() { + + innerState_ = getDefaultInstance().getInnerState(); + onChanged(); + return this; + } + /** + *
+     * Output only. A message containing the detailed operation state.
+     * 
+ * + * string inner_state = 2; + */ + public Builder setInnerStateBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + innerState_ = value; + onChanged(); + return this; + } + + private java.lang.Object details_ = ""; + /** + *
+     * Output only. A message containing any operation metadata details.
+     * 
+ * + * string details = 3; + */ + public java.lang.String getDetails() { + java.lang.Object ref = details_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + details_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. A message containing any operation metadata details.
+     * 
+ * + * string details = 3; + */ + public com.google.protobuf.ByteString + getDetailsBytes() { + java.lang.Object ref = details_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + details_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. A message containing any operation metadata details.
+     * 
+ * + * string details = 3; + */ + public Builder setDetails( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + details_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A message containing any operation metadata details.
+     * 
+ * + * string details = 3; + */ + public Builder clearDetails() { + + details_ = getDefaultInstance().getDetails(); + onChanged(); + return this; + } + /** + *
+     * Output only. A message containing any operation metadata details.
+     * 
+ * + * string details = 3; + */ + public Builder setDetailsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + details_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp stateStartTime_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> stateStartTimeBuilder_; + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public boolean hasStateStartTime() { + return stateStartTimeBuilder_ != null || stateStartTime_ != null; + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public com.google.protobuf.Timestamp getStateStartTime() { + if (stateStartTimeBuilder_ == null) { + return stateStartTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } else { + return stateStartTimeBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public Builder setStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stateStartTime_ = value; + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public Builder setStateStartTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = builderForValue.build(); + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public Builder mergeStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (stateStartTime_ != null) { + stateStartTime_ = + com.google.protobuf.Timestamp.newBuilder(stateStartTime_).mergeFrom(value).buildPartial(); + } else { + stateStartTime_ = value; + } + onChanged(); + } else { + stateStartTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public Builder clearStateStartTime() { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + onChanged(); + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getStateStartTimeBuilder() { + + onChanged(); + return getStateStartTimeFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + if (stateStartTimeBuilder_ != null) { + return stateStartTimeBuilder_.getMessageOrBuilder(); + } else { + return stateStartTime_ == null ? + com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } + } + /** + *
+     * Output only. The time this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getStateStartTimeFieldBuilder() { + if (stateStartTimeBuilder_ == null) { + stateStartTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + getStateStartTime(), + getParentForChildren(), + isClean()); + stateStartTime_ = null; + } + return stateStartTimeBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterOperationStatus) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperationStatus) + private static final com.google.cloud.dataproc.v1beta2.ClusterOperationStatus DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterOperationStatus(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterOperationStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterOperationStatus(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterOperationStatus getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatusOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatusOrBuilder.java new file mode 100644 index 000000000000..c9ecdcd57d63 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatusOrBuilder.java @@ -0,0 +1,87 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/operations.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterOperationStatusOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterOperationStatus) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. A message containing the operation state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + int getStateValue(); + /** + *
+   * Output only. A message containing the operation state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperationStatus.State state = 1; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State getState(); + + /** + *
+   * Output only. A message containing the detailed operation state.
+   * 
+ * + * string inner_state = 2; + */ + java.lang.String getInnerState(); + /** + *
+   * Output only. A message containing the detailed operation state.
+   * 
+ * + * string inner_state = 2; + */ + com.google.protobuf.ByteString + getInnerStateBytes(); + + /** + *
+   * Output only. A message containing any operation metadata details.
+   * 
+ * + * string details = 3; + */ + java.lang.String getDetails(); + /** + *
+   * Output only. A message containing any operation metadata details.
+   * 
+ * + * string details = 3; + */ + com.google.protobuf.ByteString + getDetailsBytes(); + + /** + *
+   * Output only. The time this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + boolean hasStateStartTime(); + /** + *
+   * Output only. The time this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + com.google.protobuf.Timestamp getStateStartTime(); + /** + *
+   * Output only. The time this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java new file mode 100644 index 000000000000..d4daae5608a7 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java @@ -0,0 +1,274 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.Cluster) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The Google Cloud Platform project ID that the cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The Google Cloud Platform project ID that the cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The cluster name. Cluster names within a project must be
+   * unique. Names of deleted clusters can be reused.
+   * 
+ * + * string cluster_name = 2; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The cluster name. Cluster names within a project must be
+   * unique. Names of deleted clusters can be reused.
+   * 
+ * + * string cluster_name = 2; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Required. The cluster config. Note that Cloud Dataproc may set
+   * default values, and values may change when clusters are updated.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + boolean hasConfig(); + /** + *
+   * Required. The cluster config. Note that Cloud Dataproc may set
+   * default values, and values may change when clusters are updated.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig(); + /** + *
+   * Required. The cluster config. Note that Cloud Dataproc may set
+   * default values, and values may change when clusters are updated.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder(); + + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + int getLabelsCount(); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + boolean containsLabels( + java.lang.String key); + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getLabels(); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + java.util.Map + getLabelsMap(); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + + java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a cluster.
+   * 
+ * + * map<string, string> labels = 8; + */ + + java.lang.String getLabelsOrThrow( + java.lang.String key); + + /** + *
+   * Output only. Cluster status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + boolean hasStatus(); + /** + *
+   * Output only. Cluster status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + com.google.cloud.dataproc.v1beta2.ClusterStatus getStatus(); + /** + *
+   * Output only. Cluster status.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus status = 4; + */ + com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusOrBuilder(); + + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + java.util.List + getStatusHistoryList(); + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + com.google.cloud.dataproc.v1beta2.ClusterStatus getStatusHistory(int index); + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + int getStatusHistoryCount(); + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + java.util.List + getStatusHistoryOrBuilderList(); + /** + *
+   * Output only. The previous cluster status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7; + */ + com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusHistoryOrBuilder( + int index); + + /** + *
+   * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+   * generates this value when it creates the cluster.
+   * 
+ * + * string cluster_uuid = 6; + */ + java.lang.String getClusterUuid(); + /** + *
+   * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
+   * generates this value when it creates the cluster.
+   * 
+ * + * string cluster_uuid = 6; + */ + com.google.protobuf.ByteString + getClusterUuidBytes(); + + /** + *
+   * Contains cluster daemon metrics such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + boolean hasMetrics(); + /** + *
+   * Contains cluster daemon metrics such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + com.google.cloud.dataproc.v1beta2.ClusterMetrics getMetrics(); + /** + *
+   * Contains cluster daemon metrics such as HDFS and YARN stats.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; + */ + com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder getMetricsOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelector.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelector.java new file mode 100644 index 000000000000..0cb421173e4b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelector.java @@ -0,0 +1,931 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A selector that chooses target cluster for jobs based on metadata.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterSelector} + */ +public final class ClusterSelector extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterSelector) + ClusterSelectorOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterSelector.newBuilder() to construct. + private ClusterSelector(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterSelector() { + zone_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterSelector( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + zone_ = s; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + clusterLabels_ = com.google.protobuf.MapField.newMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000002; + } + com.google.protobuf.MapEntry + clusterLabels__ = input.readMessage( + ClusterLabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + clusterLabels_.getMutableMap().put( + clusterLabels__.getKey(), clusterLabels__.getValue()); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetClusterLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterSelector.class, com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder.class); + } + + private int bitField0_; + public static final int ZONE_FIELD_NUMBER = 1; + private volatile java.lang.Object zone_; + /** + *
+   * Optional. The zone where workflow process executes. This parameter does not
+   * affect the selection of the cluster.
+   * If unspecified, the zone of the first cluster matching the selector
+   * is used.
+   * 
+ * + * string zone = 1; + */ + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } + } + /** + *
+   * Optional. The zone where workflow process executes. This parameter does not
+   * affect the selection of the cluster.
+   * If unspecified, the zone of the first cluster matching the selector
+   * is used.
+   * 
+ * + * string zone = 1; + */ + public com.google.protobuf.ByteString + getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_LABELS_FIELD_NUMBER = 2; + private static final class ClusterLabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_ClusterLabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> clusterLabels_; + private com.google.protobuf.MapField + internalGetClusterLabels() { + if (clusterLabels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + } + return clusterLabels_; + } + + public int getClusterLabelsCount() { + return internalGetClusterLabels().getMap().size(); + } + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public boolean containsClusterLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetClusterLabels().getMap().containsKey(key); + } + /** + * Use {@link #getClusterLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getClusterLabels() { + return getClusterLabelsMap(); + } + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public java.util.Map getClusterLabelsMap() { + return internalGetClusterLabels().getMap(); + } + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public java.lang.String getClusterLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetClusterLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public java.lang.String getClusterLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetClusterLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getZoneBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, zone_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetClusterLabels(), + ClusterLabelsDefaultEntryHolder.defaultEntry, + 2); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getZoneBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, zone_); + } + for (java.util.Map.Entry entry + : internalGetClusterLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + clusterLabels__ = ClusterLabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, clusterLabels__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterSelector)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterSelector other = (com.google.cloud.dataproc.v1beta2.ClusterSelector) obj; + + boolean result = true; + result = result && getZone() + .equals(other.getZone()); + result = result && internalGetClusterLabels().equals( + other.internalGetClusterLabels()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ZONE_FIELD_NUMBER; + hash = (53 * hash) + getZone().hashCode(); + if (!internalGetClusterLabels().getMap().isEmpty()) { + hash = (37 * hash) + CLUSTER_LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetClusterLabels().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterSelector parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterSelector prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A selector that chooses target cluster for jobs based on metadata.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterSelector} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterSelector) + com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetClusterLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 2: + return internalGetMutableClusterLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterSelector.class, com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterSelector.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + zone_ = ""; + + internalGetMutableClusterLabels().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterSelector getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterSelector build() { + com.google.cloud.dataproc.v1beta2.ClusterSelector result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterSelector buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterSelector result = new com.google.cloud.dataproc.v1beta2.ClusterSelector(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.zone_ = zone_; + result.clusterLabels_ = internalGetClusterLabels(); + result.clusterLabels_.makeImmutable(); + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterSelector) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterSelector)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterSelector other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance()) return this; + if (!other.getZone().isEmpty()) { + zone_ = other.zone_; + onChanged(); + } + internalGetMutableClusterLabels().mergeFrom( + other.internalGetClusterLabels()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterSelector parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterSelector) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object zone_ = ""; + /** + *
+     * Optional. The zone where workflow process executes. This parameter does not
+     * affect the selection of the cluster.
+     * If unspecified, the zone of the first cluster matching the selector
+     * is used.
+     * 
+ * + * string zone = 1; + */ + public java.lang.String getZone() { + java.lang.Object ref = zone_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zone_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The zone where workflow process executes. This parameter does not
+     * affect the selection of the cluster.
+     * If unspecified, the zone of the first cluster matching the selector
+     * is used.
+     * 
+ * + * string zone = 1; + */ + public com.google.protobuf.ByteString + getZoneBytes() { + java.lang.Object ref = zone_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + zone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The zone where workflow process executes. This parameter does not
+     * affect the selection of the cluster.
+     * If unspecified, the zone of the first cluster matching the selector
+     * is used.
+     * 
+ * + * string zone = 1; + */ + public Builder setZone( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + zone_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The zone where workflow process executes. This parameter does not
+     * affect the selection of the cluster.
+     * If unspecified, the zone of the first cluster matching the selector
+     * is used.
+     * 
+ * + * string zone = 1; + */ + public Builder clearZone() { + + zone_ = getDefaultInstance().getZone(); + onChanged(); + return this; + } + /** + *
+     * Optional. The zone where workflow process executes. This parameter does not
+     * affect the selection of the cluster.
+     * If unspecified, the zone of the first cluster matching the selector
+     * is used.
+     * 
+ * + * string zone = 1; + */ + public Builder setZoneBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + zone_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> clusterLabels_; + private com.google.protobuf.MapField + internalGetClusterLabels() { + if (clusterLabels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + } + return clusterLabels_; + } + private com.google.protobuf.MapField + internalGetMutableClusterLabels() { + onChanged();; + if (clusterLabels_ == null) { + clusterLabels_ = com.google.protobuf.MapField.newMapField( + ClusterLabelsDefaultEntryHolder.defaultEntry); + } + if (!clusterLabels_.isMutable()) { + clusterLabels_ = clusterLabels_.copy(); + } + return clusterLabels_; + } + + public int getClusterLabelsCount() { + return internalGetClusterLabels().getMap().size(); + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public boolean containsClusterLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetClusterLabels().getMap().containsKey(key); + } + /** + * Use {@link #getClusterLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getClusterLabels() { + return getClusterLabelsMap(); + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public java.util.Map getClusterLabelsMap() { + return internalGetClusterLabels().getMap(); + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public java.lang.String getClusterLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetClusterLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public java.lang.String getClusterLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetClusterLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearClusterLabels() { + internalGetMutableClusterLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public Builder removeClusterLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableClusterLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableClusterLabels() { + return internalGetMutableClusterLabels().getMutableMap(); + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + public Builder putClusterLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableClusterLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Required. The cluster labels. Cluster must have all labels
+     * to match.
+     * 
+ * + * map<string, string> cluster_labels = 2; + */ + + public Builder putAllClusterLabels( + java.util.Map values) { + internalGetMutableClusterLabels().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterSelector) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterSelector) + private static final com.google.cloud.dataproc.v1beta2.ClusterSelector DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterSelector(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterSelector getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterSelector parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterSelector(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterSelector getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelectorOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelectorOrBuilder.java new file mode 100644 index 000000000000..a25bfe127960 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelectorOrBuilder.java @@ -0,0 +1,92 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterSelectorOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterSelector) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. The zone where workflow process executes. This parameter does not
+   * affect the selection of the cluster.
+   * If unspecified, the zone of the first cluster matching the selector
+   * is used.
+   * 
+ * + * string zone = 1; + */ + java.lang.String getZone(); + /** + *
+   * Optional. The zone where workflow process executes. This parameter does not
+   * affect the selection of the cluster.
+   * If unspecified, the zone of the first cluster matching the selector
+   * is used.
+   * 
+ * + * string zone = 1; + */ + com.google.protobuf.ByteString + getZoneBytes(); + + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + int getClusterLabelsCount(); + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + boolean containsClusterLabels( + java.lang.String key); + /** + * Use {@link #getClusterLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getClusterLabels(); + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + java.util.Map + getClusterLabelsMap(); + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + + java.lang.String getClusterLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Required. The cluster labels. Cluster must have all labels
+   * to match.
+   * 
+ * + * map<string, string> cluster_labels = 2; + */ + + java.lang.String getClusterLabelsOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatus.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatus.java new file mode 100644 index 000000000000..b5c05475f07b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatus.java @@ -0,0 +1,1370 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The status of a cluster and its instances.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterStatus} + */ +public final class ClusterStatus extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ClusterStatus) + ClusterStatusOrBuilder { +private static final long serialVersionUID = 0L; + // Use ClusterStatus.newBuilder() to construct. + private ClusterStatus(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClusterStatus() { + state_ = 0; + detail_ = ""; + substate_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClusterStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + detail_ = s; + break; + } + case 26: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (stateStartTime_ != null) { + subBuilder = stateStartTime_.toBuilder(); + } + stateStartTime_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stateStartTime_); + stateStartTime_ = subBuilder.buildPartial(); + } + + break; + } + case 32: { + int rawValue = input.readEnum(); + + substate_ = rawValue; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterStatus.class, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder.class); + } + + /** + *
+   * The cluster state.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.ClusterStatus.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * The cluster state is unknown.
+     * 
+ * + * UNKNOWN = 0; + */ + UNKNOWN(0), + /** + *
+     * The cluster is being created and set up. It is not ready for use.
+     * 
+ * + * CREATING = 1; + */ + CREATING(1), + /** + *
+     * The cluster is currently running and healthy. It is ready for use.
+     * 
+ * + * RUNNING = 2; + */ + RUNNING(2), + /** + *
+     * The cluster encountered an error. It is not ready for use.
+     * 
+ * + * ERROR = 3; + */ + ERROR(3), + /** + *
+     * The cluster is being deleted. It cannot be used.
+     * 
+ * + * DELETING = 4; + */ + DELETING(4), + /** + *
+     * The cluster is being updated. It continues to accept and process jobs.
+     * 
+ * + * UPDATING = 5; + */ + UPDATING(5), + UNRECOGNIZED(-1), + ; + + /** + *
+     * The cluster state is unknown.
+     * 
+ * + * UNKNOWN = 0; + */ + public static final int UNKNOWN_VALUE = 0; + /** + *
+     * The cluster is being created and set up. It is not ready for use.
+     * 
+ * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + /** + *
+     * The cluster is currently running and healthy. It is ready for use.
+     * 
+ * + * RUNNING = 2; + */ + public static final int RUNNING_VALUE = 2; + /** + *
+     * The cluster encountered an error. It is not ready for use.
+     * 
+ * + * ERROR = 3; + */ + public static final int ERROR_VALUE = 3; + /** + *
+     * The cluster is being deleted. It cannot be used.
+     * 
+ * + * DELETING = 4; + */ + public static final int DELETING_VALUE = 4; + /** + *
+     * The cluster is being updated. It continues to accept and process jobs.
+     * 
+ * + * UPDATING = 5; + */ + public static final int UPDATING_VALUE = 5; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + public static State forNumber(int value) { + switch (value) { + case 0: return UNKNOWN; + case 1: return CREATING; + case 2: return RUNNING; + case 3: return ERROR; + case 4: return DELETING; + case 5: return UPDATING; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + State> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClusterStatus.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.ClusterStatus.State) + } + + /** + *
+   * The cluster substate.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.ClusterStatus.Substate} + */ + public enum Substate + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * The cluster substate is unknown.
+     * 
+ * + * UNSPECIFIED = 0; + */ + UNSPECIFIED(0), + /** + *
+     * The cluster is known to be in an unhealthy state
+     * (for example, critical daemons are not running or HDFS capacity is
+     * exhausted).
+     * Applies to RUNNING state.
+     * 
+ * + * UNHEALTHY = 1; + */ + UNHEALTHY(1), + /** + *
+     * The agent-reported status is out of date (may occur if
+     * Cloud Dataproc loses communication with Agent).
+     * Applies to RUNNING state.
+     * 
+ * + * STALE_STATUS = 2; + */ + STALE_STATUS(2), + UNRECOGNIZED(-1), + ; + + /** + *
+     * The cluster substate is unknown.
+     * 
+ * + * UNSPECIFIED = 0; + */ + public static final int UNSPECIFIED_VALUE = 0; + /** + *
+     * The cluster is known to be in an unhealthy state
+     * (for example, critical daemons are not running or HDFS capacity is
+     * exhausted).
+     * Applies to RUNNING state.
+     * 
+ * + * UNHEALTHY = 1; + */ + public static final int UNHEALTHY_VALUE = 1; + /** + *
+     * The agent-reported status is out of date (may occur if
+     * Cloud Dataproc loses communication with Agent).
+     * Applies to RUNNING state.
+     * 
+ * + * STALE_STATUS = 2; + */ + public static final int STALE_STATUS_VALUE = 2; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Substate valueOf(int value) { + return forNumber(value); + } + + public static Substate forNumber(int value) { + switch (value) { + case 0: return UNSPECIFIED; + case 1: return UNHEALTHY; + case 2: return STALE_STATUS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + Substate> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Substate findValueByNumber(int number) { + return Substate.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClusterStatus.getDescriptor().getEnumTypes().get(1); + } + + private static final Substate[] VALUES = values(); + + public static Substate valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Substate(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.ClusterStatus.Substate) + } + + public static final int STATE_FIELD_NUMBER = 1; + private int state_; + /** + *
+   * Output only. The cluster's state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public int getStateValue() { + return state_; + } + /** + *
+   * Output only. The cluster's state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ClusterStatus.State result = com.google.cloud.dataproc.v1beta2.ClusterStatus.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.State.UNRECOGNIZED : result; + } + + public static final int DETAIL_FIELD_NUMBER = 2; + private volatile java.lang.Object detail_; + /** + *
+   * Output only. Optional details of cluster's state.
+   * 
+ * + * string detail = 2; + */ + public java.lang.String getDetail() { + java.lang.Object ref = detail_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + detail_ = s; + return s; + } + } + /** + *
+   * Output only. Optional details of cluster's state.
+   * 
+ * + * string detail = 2; + */ + public com.google.protobuf.ByteString + getDetailBytes() { + java.lang.Object ref = detail_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + detail_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_START_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp stateStartTime_; + /** + *
+   * Output only. Time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public boolean hasStateStartTime() { + return stateStartTime_ != null; + } + /** + *
+   * Output only. Time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public com.google.protobuf.Timestamp getStateStartTime() { + return stateStartTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } + /** + *
+   * Output only. Time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + return getStateStartTime(); + } + + public static final int SUBSTATE_FIELD_NUMBER = 4; + private int substate_; + /** + *
+   * Output only. Additional state information that includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public int getSubstateValue() { + return substate_; + } + /** + *
+   * Output only. Additional state information that includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate getSubstate() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate result = com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate.valueOf(substate_); + return result == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (state_ != com.google.cloud.dataproc.v1beta2.ClusterStatus.State.UNKNOWN.getNumber()) { + output.writeEnum(1, state_); + } + if (!getDetailBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, detail_); + } + if (stateStartTime_ != null) { + output.writeMessage(3, getStateStartTime()); + } + if (substate_ != com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate.UNSPECIFIED.getNumber()) { + output.writeEnum(4, substate_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (state_ != com.google.cloud.dataproc.v1beta2.ClusterStatus.State.UNKNOWN.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_); + } + if (!getDetailBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, detail_); + } + if (stateStartTime_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getStateStartTime()); + } + if (substate_ != com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate.UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, substate_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ClusterStatus)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ClusterStatus other = (com.google.cloud.dataproc.v1beta2.ClusterStatus) obj; + + boolean result = true; + result = result && state_ == other.state_; + result = result && getDetail() + .equals(other.getDetail()); + result = result && (hasStateStartTime() == other.hasStateStartTime()); + if (hasStateStartTime()) { + result = result && getStateStartTime() + .equals(other.getStateStartTime()); + } + result = result && substate_ == other.substate_; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + DETAIL_FIELD_NUMBER; + hash = (53 * hash) + getDetail().hashCode(); + if (hasStateStartTime()) { + hash = (37 * hash) + STATE_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStateStartTime().hashCode(); + } + hash = (37 * hash) + SUBSTATE_FIELD_NUMBER; + hash = (53 * hash) + substate_; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ClusterStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ClusterStatus prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The status of a cluster and its instances.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ClusterStatus} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ClusterStatus) + com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ClusterStatus.class, com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ClusterStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + state_ = 0; + + detail_ = ""; + + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + substate_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterStatus getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterStatus build() { + com.google.cloud.dataproc.v1beta2.ClusterStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterStatus buildPartial() { + com.google.cloud.dataproc.v1beta2.ClusterStatus result = new com.google.cloud.dataproc.v1beta2.ClusterStatus(this); + result.state_ = state_; + result.detail_ = detail_; + if (stateStartTimeBuilder_ == null) { + result.stateStartTime_ = stateStartTime_; + } else { + result.stateStartTime_ = stateStartTimeBuilder_.build(); + } + result.substate_ = substate_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ClusterStatus) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ClusterStatus other) { + if (other == com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance()) return this; + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getDetail().isEmpty()) { + detail_ = other.detail_; + onChanged(); + } + if (other.hasStateStartTime()) { + mergeStateStartTime(other.getStateStartTime()); + } + if (other.substate_ != 0) { + setSubstateValue(other.getSubstateValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ClusterStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ClusterStatus) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int state_ = 0; + /** + *
+     * Output only. The cluster's state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public int getStateValue() { + return state_; + } + /** + *
+     * Output only. The cluster's state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public Builder setStateValue(int value) { + state_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The cluster's state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ClusterStatus.State result = com.google.cloud.dataproc.v1beta2.ClusterStatus.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.State.UNRECOGNIZED : result; + } + /** + *
+     * Output only. The cluster's state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public Builder setState(com.google.cloud.dataproc.v1beta2.ClusterStatus.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. The cluster's state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object detail_ = ""; + /** + *
+     * Output only. Optional details of cluster's state.
+     * 
+ * + * string detail = 2; + */ + public java.lang.String getDetail() { + java.lang.Object ref = detail_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + detail_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. Optional details of cluster's state.
+     * 
+ * + * string detail = 2; + */ + public com.google.protobuf.ByteString + getDetailBytes() { + java.lang.Object ref = detail_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + detail_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. Optional details of cluster's state.
+     * 
+ * + * string detail = 2; + */ + public Builder setDetail( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + detail_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Optional details of cluster's state.
+     * 
+ * + * string detail = 2; + */ + public Builder clearDetail() { + + detail_ = getDefaultInstance().getDetail(); + onChanged(); + return this; + } + /** + *
+     * Output only. Optional details of cluster's state.
+     * 
+ * + * string detail = 2; + */ + public Builder setDetailBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + detail_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp stateStartTime_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> stateStartTimeBuilder_; + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public boolean hasStateStartTime() { + return stateStartTimeBuilder_ != null || stateStartTime_ != null; + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public com.google.protobuf.Timestamp getStateStartTime() { + if (stateStartTimeBuilder_ == null) { + return stateStartTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } else { + return stateStartTimeBuilder_.getMessage(); + } + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public Builder setStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stateStartTime_ = value; + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public Builder setStateStartTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = builderForValue.build(); + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public Builder mergeStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (stateStartTime_ != null) { + stateStartTime_ = + com.google.protobuf.Timestamp.newBuilder(stateStartTime_).mergeFrom(value).buildPartial(); + } else { + stateStartTime_ = value; + } + onChanged(); + } else { + stateStartTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public Builder clearStateStartTime() { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + onChanged(); + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getStateStartTimeBuilder() { + + onChanged(); + return getStateStartTimeFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + if (stateStartTimeBuilder_ != null) { + return stateStartTimeBuilder_.getMessageOrBuilder(); + } else { + return stateStartTime_ == null ? + com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } + } + /** + *
+     * Output only. Time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getStateStartTimeFieldBuilder() { + if (stateStartTimeBuilder_ == null) { + stateStartTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + getStateStartTime(), + getParentForChildren(), + isClean()); + stateStartTime_ = null; + } + return stateStartTimeBuilder_; + } + + private int substate_ = 0; + /** + *
+     * Output only. Additional state information that includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public int getSubstateValue() { + return substate_; + } + /** + *
+     * Output only. Additional state information that includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public Builder setSubstateValue(int value) { + substate_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Additional state information that includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate getSubstate() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate result = com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate.valueOf(substate_); + return result == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate.UNRECOGNIZED : result; + } + /** + *
+     * Output only. Additional state information that includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public Builder setSubstate(com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate value) { + if (value == null) { + throw new NullPointerException(); + } + + substate_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. Additional state information that includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + public Builder clearSubstate() { + + substate_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ClusterStatus) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterStatus) + private static final com.google.cloud.dataproc.v1beta2.ClusterStatus DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ClusterStatus(); + } + + public static com.google.cloud.dataproc.v1beta2.ClusterStatus getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClusterStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClusterStatus(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ClusterStatus getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatusOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatusOrBuilder.java new file mode 100644 index 000000000000..efb1c9c6a827 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatusOrBuilder.java @@ -0,0 +1,88 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ClusterStatusOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ClusterStatus) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The cluster's state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + int getStateValue(); + /** + *
+   * Output only. The cluster's state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.State state = 1; + */ + com.google.cloud.dataproc.v1beta2.ClusterStatus.State getState(); + + /** + *
+   * Output only. Optional details of cluster's state.
+   * 
+ * + * string detail = 2; + */ + java.lang.String getDetail(); + /** + *
+   * Output only. Optional details of cluster's state.
+   * 
+ * + * string detail = 2; + */ + com.google.protobuf.ByteString + getDetailBytes(); + + /** + *
+   * Output only. Time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + boolean hasStateStartTime(); + /** + *
+   * Output only. Time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + com.google.protobuf.Timestamp getStateStartTime(); + /** + *
+   * Output only. Time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder(); + + /** + *
+   * Output only. Additional state information that includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + int getSubstateValue(); + /** + *
+   * Output only. Additional state information that includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterStatus.Substate substate = 4; + */ + com.google.cloud.dataproc.v1beta2.ClusterStatus.Substate getSubstate(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java new file mode 100644 index 000000000000..31d41dd5a1e8 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java @@ -0,0 +1,476 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public final class ClustersProto { + private ClustersProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_Cluster_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_Cluster_LabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_Cluster_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_MetadataEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_MetadataEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_DiskConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_DiskConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_HdfsMetricsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_HdfsMetricsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_YarnMetricsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_YarnMetricsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n,google/cloud/dataproc/v1beta2/clusters" + + ".proto\022\035google.cloud.dataproc.v1beta2\032\034g" + + "oogle/api/annotations.proto\032*google/clou" + + "d/dataproc/v1beta2/shared.proto\032#google/" + + "longrunning/operations.proto\032\036google/pro" + + "tobuf/duration.proto\032 google/protobuf/fi" + + "eld_mask.proto\032\037google/protobuf/timestam" + + "p.proto\"\276\003\n\007Cluster\022\022\n\nproject_id\030\001 \001(\t\022" + + "\024\n\014cluster_name\030\002 \001(\t\022<\n\006config\030\003 \001(\0132,." + + "google.cloud.dataproc.v1beta2.ClusterCon" + + "fig\022B\n\006labels\030\010 \003(\01322.google.cloud.datap" + + "roc.v1beta2.Cluster.LabelsEntry\022<\n\006statu" + + "s\030\004 \001(\0132,.google.cloud.dataproc.v1beta2." + + "ClusterStatus\022D\n\016status_history\030\007 \003(\0132,." + + "google.cloud.dataproc.v1beta2.ClusterSta" + + "tus\022\024\n\014cluster_uuid\030\006 \001(\t\022>\n\007metrics\030\t \001" + + "(\0132-.google.cloud.dataproc.v1beta2.Clust" + + "erMetrics\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"\311\004\n\rClusterConfig\022\025\n\rco" + + "nfig_bucket\030\001 \001(\t\022K\n\022gce_cluster_config\030" + + "\010 \001(\0132/.google.cloud.dataproc.v1beta2.Gc" + + "eClusterConfig\022I\n\rmaster_config\030\t \001(\01322." + + "google.cloud.dataproc.v1beta2.InstanceGr" + + "oupConfig\022I\n\rworker_config\030\n \001(\01322.googl" + + "e.cloud.dataproc.v1beta2.InstanceGroupCo" + + "nfig\022S\n\027secondary_worker_config\030\014 \001(\01322." + + "google.cloud.dataproc.v1beta2.InstanceGr" + + "oupConfig\022F\n\017software_config\030\r \001(\0132-.goo" + + "gle.cloud.dataproc.v1beta2.SoftwareConfi" + + "g\022H\n\020lifecycle_config\030\016 \001(\0132..google.clo" + + "ud.dataproc.v1beta2.LifecycleConfig\022W\n\026i" + + "nitialization_actions\030\013 \003(\01327.google.clo" + + "ud.dataproc.v1beta2.NodeInitializationAc" + + "tion\"\264\002\n\020GceClusterConfig\022\020\n\010zone_uri\030\001 " + + "\001(\t\022\023\n\013network_uri\030\002 \001(\t\022\026\n\016subnetwork_u" + + "ri\030\006 \001(\t\022\030\n\020internal_ip_only\030\007 \001(\010\022\027\n\017se" + + "rvice_account\030\010 \001(\t\022\036\n\026service_account_s" + + "copes\030\003 \003(\t\022\014\n\004tags\030\004 \003(\t\022O\n\010metadata\030\005 " + + "\003(\0132=.google.cloud.dataproc.v1beta2.GceC" + + "lusterConfig.MetadataEntry\032/\n\rMetadataEn" + + "try\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\374\002\n\023" + + "InstanceGroupConfig\022\025\n\rnum_instances\030\001 \001" + + "(\005\022\026\n\016instance_names\030\002 \003(\t\022\021\n\timage_uri\030" + + "\003 \001(\t\022\030\n\020machine_type_uri\030\004 \001(\t\022>\n\013disk_" + + "config\030\005 \001(\0132).google.cloud.dataproc.v1b" + + "eta2.DiskConfig\022\026\n\016is_preemptible\030\006 \001(\010\022" + + "O\n\024managed_group_config\030\007 \001(\01321.google.c" + + "loud.dataproc.v1beta2.ManagedGroupConfig" + + "\022F\n\014accelerators\030\010 \003(\01320.google.cloud.da" + + "taproc.v1beta2.AcceleratorConfig\022\030\n\020min_" + + "cpu_platform\030\t \001(\t\"Y\n\022ManagedGroupConfig" + + "\022\036\n\026instance_template_name\030\001 \001(\t\022#\n\033inst" + + "ance_group_manager_name\030\002 \001(\t\"L\n\021Acceler" + + "atorConfig\022\034\n\024accelerator_type_uri\030\001 \001(\t" + + "\022\031\n\021accelerator_count\030\002 \001(\005\"W\n\nDiskConfi" + + "g\022\026\n\016boot_disk_type\030\003 \001(\t\022\031\n\021boot_disk_s" + + "ize_gb\030\001 \001(\005\022\026\n\016num_local_ssds\030\002 \001(\005\"\272\001\n" + + "\017LifecycleConfig\0222\n\017idle_delete_ttl\030\001 \001(" + + "\0132\031.google.protobuf.Duration\0226\n\020auto_del" + + "ete_time\030\002 \001(\0132\032.google.protobuf.Timesta" + + "mpH\000\0224\n\017auto_delete_ttl\030\003 \001(\0132\031.google.p" + + "rotobuf.DurationH\000B\005\n\003ttl\"i\n\030NodeInitial" + + "izationAction\022\027\n\017executable_file\030\001 \001(\t\0224" + + "\n\021execution_timeout\030\002 \001(\0132\031.google.proto" + + "buf.Duration\"\367\002\n\rClusterStatus\022A\n\005state\030" + + "\001 \001(\01622.google.cloud.dataproc.v1beta2.Cl" + + "usterStatus.State\022\016\n\006detail\030\002 \001(\t\0224\n\020sta" + + "te_start_time\030\003 \001(\0132\032.google.protobuf.Ti" + + "mestamp\022G\n\010substate\030\004 \001(\01625.google.cloud" + + ".dataproc.v1beta2.ClusterStatus.Substate" + + "\"V\n\005State\022\013\n\007UNKNOWN\020\000\022\014\n\010CREATING\020\001\022\013\n\007" + + "RUNNING\020\002\022\t\n\005ERROR\020\003\022\014\n\010DELETING\020\004\022\014\n\010UP" + + "DATING\020\005\"<\n\010Substate\022\017\n\013UNSPECIFIED\020\000\022\r\n" + + "\tUNHEALTHY\020\001\022\020\n\014STALE_STATUS\020\002\"\255\001\n\016Softw" + + "areConfig\022\025\n\rimage_version\030\001 \001(\t\022Q\n\nprop" + + "erties\030\002 \003(\0132=.google.cloud.dataproc.v1b" + + "eta2.SoftwareConfig.PropertiesEntry\0321\n\017P" + + "ropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001" + + "(\t:\0028\001\"\244\002\n\016ClusterMetrics\022T\n\014hdfs_metric" + + "s\030\001 \003(\0132>.google.cloud.dataproc.v1beta2." + + "ClusterMetrics.HdfsMetricsEntry\022T\n\014yarn_" + + "metrics\030\002 \003(\0132>.google.cloud.dataproc.v1" + + "beta2.ClusterMetrics.YarnMetricsEntry\0322\n" + + "\020HdfsMetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030" + + "\002 \001(\003:\0028\001\0322\n\020YarnMetricsEntry\022\013\n\003key\030\001 \001" + + "(\t\022\r\n\005value\030\002 \001(\003:\0028\001\"\207\001\n\024CreateClusterR" + + "equest\022\022\n\nproject_id\030\001 \001(\t\022\016\n\006region\030\003 \001" + + "(\t\0227\n\007cluster\030\002 \001(\0132&.google.cloud.datap" + + "roc.v1beta2.Cluster\022\022\n\nrequest_id\030\004 \001(\t\"" + + "\220\002\n\024UpdateClusterRequest\022\022\n\nproject_id\030\001" + + " \001(\t\022\016\n\006region\030\005 \001(\t\022\024\n\014cluster_name\030\002 \001" + + "(\t\0227\n\007cluster\030\003 \001(\0132&.google.cloud.datap" + + "roc.v1beta2.Cluster\022@\n\035graceful_decommis" + + "sion_timeout\030\006 \001(\0132\031.google.protobuf.Dur" + + "ation\022/\n\013update_mask\030\004 \001(\0132\032.google.prot" + + "obuf.FieldMask\022\022\n\nrequest_id\030\007 \001(\t\"z\n\024De" + + "leteClusterRequest\022\022\n\nproject_id\030\001 \001(\t\022\016" + + "\n\006region\030\003 \001(\t\022\024\n\014cluster_name\030\002 \001(\t\022\024\n\014" + + "cluster_uuid\030\004 \001(\t\022\022\n\nrequest_id\030\005 \001(\t\"M" + + "\n\021GetClusterRequest\022\022\n\nproject_id\030\001 \001(\t\022" + + "\016\n\006region\030\003 \001(\t\022\024\n\014cluster_name\030\002 \001(\t\"p\n" + + "\023ListClustersRequest\022\022\n\nproject_id\030\001 \001(\t" + + "\022\016\n\006region\030\004 \001(\t\022\016\n\006filter\030\005 \001(\t\022\021\n\tpage" + + "_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t\"i\n\024ListC" + + "lustersResponse\0228\n\010clusters\030\001 \003(\0132&.goog" + + "le.cloud.dataproc.v1beta2.Cluster\022\027\n\017nex" + + "t_page_token\030\002 \001(\t\"R\n\026DiagnoseClusterReq" + + "uest\022\022\n\nproject_id\030\001 \001(\t\022\016\n\006region\030\003 \001(\t" + + "\022\024\n\014cluster_name\030\002 \001(\t\",\n\026DiagnoseCluste" + + "rResults\022\022\n\noutput_uri\030\001 \001(\t2\370\010\n\021Cluster" + + "Controller\022\256\001\n\rCreateCluster\0223.google.cl" + + "oud.dataproc.v1beta2.CreateClusterReques" + + "t\032\035.google.longrunning.Operation\"I\202\323\344\223\002C" + + "\"8/v1beta2/projects/{project_id}/regions" + + "/{region}/clusters:\007cluster\022\275\001\n\rUpdateCl" + + "uster\0223.google.cloud.dataproc.v1beta2.Up" + + "dateClusterRequest\032\035.google.longrunning." + + "Operation\"X\202\323\344\223\002R2G/v1beta2/projects/{pr" + + "oject_id}/regions/{region}/clusters/{clu" + + "ster_name}:\007cluster\022\264\001\n\rDeleteCluster\0223." + + "google.cloud.dataproc.v1beta2.DeleteClus" + + "terRequest\032\035.google.longrunning.Operatio" + + "n\"O\202\323\344\223\002I*G/v1beta2/projects/{project_id" + + "}/regions/{region}/clusters/{cluster_nam" + + "e}\022\267\001\n\nGetCluster\0220.google.cloud.datapro" + + "c.v1beta2.GetClusterRequest\032&.google.clo" + + "ud.dataproc.v1beta2.Cluster\"O\202\323\344\223\002I\022G/v1" + + "beta2/projects/{project_id}/regions/{reg" + + "ion}/clusters/{cluster_name}\022\271\001\n\014ListClu" + + "sters\0222.google.cloud.dataproc.v1beta2.Li" + + "stClustersRequest\0323.google.cloud.datapro" + + "c.v1beta2.ListClustersResponse\"@\202\323\344\223\002:\0228" + + "/v1beta2/projects/{project_id}/regions/{" + + "region}/clusters\022\304\001\n\017DiagnoseCluster\0225.g" + + "oogle.cloud.dataproc.v1beta2.DiagnoseClu" + + "sterRequest\032\035.google.longrunning.Operati" + + "on\"[\202\323\344\223\002U\"P/v1beta2/projects/{project_i" + + "d}/regions/{region}/clusters/{cluster_na" + + "me}:diagnose:\001*B{\n!com.google.cloud.data" + + "proc.v1beta2B\rClustersProtoP\001ZEgoogle.go" + + "lang.org/genproto/googleapis/cloud/datap" + + "roc/v1beta2;dataprocb\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.cloud.dataproc.v1beta2.SharedProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }, assigner); + internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_Cluster_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor, + new java.lang.String[] { "ProjectId", "ClusterName", "Config", "Labels", "Status", "StatusHistory", "ClusterUuid", "Metrics", }); + internal_static_google_cloud_dataproc_v1beta2_Cluster_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_Cluster_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_Cluster_LabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_Cluster_LabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterConfig_descriptor, + new java.lang.String[] { "ConfigBucket", "GceClusterConfig", "MasterConfig", "WorkerConfig", "SecondaryWorkerConfig", "SoftwareConfig", "LifecycleConfig", "InitializationActions", }); + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor, + new java.lang.String[] { "ZoneUri", "NetworkUri", "SubnetworkUri", "InternalIpOnly", "ServiceAccount", "ServiceAccountScopes", "Tags", "Metadata", }); + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_MetadataEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_MetadataEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_MetadataEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_descriptor, + new java.lang.String[] { "NumInstances", "InstanceNames", "ImageUri", "MachineTypeUri", "DiskConfig", "IsPreemptible", "ManagedGroupConfig", "Accelerators", "MinCpuPlatform", }); + internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_descriptor, + new java.lang.String[] { "InstanceTemplateName", "InstanceGroupManagerName", }); + internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor, + new java.lang.String[] { "AcceleratorTypeUri", "AcceleratorCount", }); + internal_static_google_cloud_dataproc_v1beta2_DiskConfig_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_dataproc_v1beta2_DiskConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_DiskConfig_descriptor, + new java.lang.String[] { "BootDiskType", "BootDiskSizeGb", "NumLocalSsds", }); + internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_descriptor, + new java.lang.String[] { "IdleDeleteTtl", "AutoDeleteTime", "AutoDeleteTtl", "Ttl", }); + internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_descriptor, + new java.lang.String[] { "ExecutableFile", "ExecutionTimeout", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterStatus_descriptor, + new java.lang.String[] { "State", "Detail", "StateStartTime", "Substate", }); + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor, + new java.lang.String[] { "ImageVersion", "Properties", }); + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor, + new java.lang.String[] { "HdfsMetrics", "YarnMetrics", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_HdfsMetricsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_HdfsMetricsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_HdfsMetricsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_YarnMetricsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_descriptor.getNestedTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_YarnMetricsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterMetrics_YarnMetricsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "Cluster", "RequestId", }); + internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "ClusterName", "Cluster", "GracefulDecommissionTimeout", "UpdateMask", "RequestId", }); + internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId", }); + internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "ClusterName", }); + internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "Filter", "PageSize", "PageToken", }); + internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_descriptor, + new java.lang.String[] { "Clusters", "NextPageToken", }); + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "ClusterName", }); + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_descriptor, + new java.lang.String[] { "OutputUri", }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.AnnotationsProto.http); + com.google.protobuf.Descriptors.FileDescriptor + .internalUpdateFileDescriptor(descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.cloud.dataproc.v1beta2.SharedProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java new file mode 100644 index 000000000000..d4c15e7728fc --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java @@ -0,0 +1,1179 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to create a cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CreateClusterRequest} + */ +public final class CreateClusterRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.CreateClusterRequest) + CreateClusterRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use CreateClusterRequest.newBuilder() to construct. + private CreateClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CreateClusterRequest() { + projectId_ = ""; + region_ = ""; + requestId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CreateClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.Cluster.Builder subBuilder = null; + if (cluster_ != null) { + subBuilder = cluster_.toBuilder(); + } + cluster_ = input.readMessage(com.google.cloud.dataproc.v1beta2.Cluster.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(cluster_); + cluster_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CreateClusterRequest.class, com.google.cloud.dataproc.v1beta2.CreateClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1beta2.Cluster cluster_; + /** + *
+   * Required. The cluster to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public boolean hasCluster() { + return cluster_ != null; + } + /** + *
+   * Required. The cluster to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public com.google.cloud.dataproc.v1beta2.Cluster getCluster() { + return cluster_ == null ? com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance() : cluster_; + } + /** + *
+   * Required. The cluster to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder() { + return getCluster(); + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + private volatile java.lang.Object requestId_; + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (cluster_ != null) { + output.writeMessage(2, getCluster()); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + if (!getRequestIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (cluster_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getCluster()); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + if (!getRequestIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.CreateClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.CreateClusterRequest other = (com.google.cloud.dataproc.v1beta2.CreateClusterRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && (hasCluster() == other.hasCluster()); + if (hasCluster()) { + result = result && getCluster() + .equals(other.getCluster()); + } + result = result && getRequestId() + .equals(other.getRequestId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + if (hasCluster()) { + hash = (37 * hash) + CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getCluster().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.CreateClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to create a cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CreateClusterRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.CreateClusterRequest) + com.google.cloud.dataproc.v1beta2.CreateClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CreateClusterRequest.class, com.google.cloud.dataproc.v1beta2.CreateClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.CreateClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + if (clusterBuilder_ == null) { + cluster_ = null; + } else { + cluster_ = null; + clusterBuilder_ = null; + } + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_CreateClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.CreateClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateClusterRequest build() { + com.google.cloud.dataproc.v1beta2.CreateClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateClusterRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.CreateClusterRequest result = new com.google.cloud.dataproc.v1beta2.CreateClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + if (clusterBuilder_ == null) { + result.cluster_ = cluster_; + } else { + result.cluster_ = clusterBuilder_.build(); + } + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.CreateClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.CreateClusterRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.CreateClusterRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.CreateClusterRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (other.hasCluster()) { + mergeCluster(other.getCluster()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.CreateClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.CreateClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.Cluster cluster_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder> clusterBuilder_; + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public boolean hasCluster() { + return clusterBuilder_ != null || cluster_ != null; + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public com.google.cloud.dataproc.v1beta2.Cluster getCluster() { + if (clusterBuilder_ == null) { + return cluster_ == null ? com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance() : cluster_; + } else { + return clusterBuilder_.getMessage(); + } + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public Builder setCluster(com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cluster_ = value; + onChanged(); + } else { + clusterBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public Builder setCluster( + com.google.cloud.dataproc.v1beta2.Cluster.Builder builderForValue) { + if (clusterBuilder_ == null) { + cluster_ = builderForValue.build(); + onChanged(); + } else { + clusterBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public Builder mergeCluster(com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clusterBuilder_ == null) { + if (cluster_ != null) { + cluster_ = + com.google.cloud.dataproc.v1beta2.Cluster.newBuilder(cluster_).mergeFrom(value).buildPartial(); + } else { + cluster_ = value; + } + onChanged(); + } else { + clusterBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public Builder clearCluster() { + if (clusterBuilder_ == null) { + cluster_ = null; + onChanged(); + } else { + cluster_ = null; + clusterBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public com.google.cloud.dataproc.v1beta2.Cluster.Builder getClusterBuilder() { + + onChanged(); + return getClusterFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder() { + if (clusterBuilder_ != null) { + return clusterBuilder_.getMessageOrBuilder(); + } else { + return cluster_ == null ? + com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance() : cluster_; + } + } + /** + *
+     * Required. The cluster to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder> + getClusterFieldBuilder() { + if (clusterBuilder_ == null) { + clusterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder>( + getCluster(), + getParentForChildren(), + isClean()); + cluster_ = null; + } + return clusterBuilder_; + } + + private java.lang.Object requestId_ = ""; + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public Builder setRequestId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public Builder setRequestIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.CreateClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CreateClusterRequest) + private static final com.google.cloud.dataproc.v1beta2.CreateClusterRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.CreateClusterRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.CreateClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java new file mode 100644 index 000000000000..99b6dfc91e70 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java @@ -0,0 +1,106 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface CreateClusterRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.CreateClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The cluster to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + boolean hasCluster(); + /** + *
+   * Required. The cluster to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + com.google.cloud.dataproc.v1beta2.Cluster getCluster(); + /** + *
+   * Required. The cluster to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 2; + */ + com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder(); + + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + java.lang.String getRequestId(); + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + com.google.protobuf.ByteString + getRequestIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequest.java new file mode 100644 index 000000000000..5f5a71840c5d --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequest.java @@ -0,0 +1,820 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to create a workflow template.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest} + */ +public final class CreateWorkflowTemplateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) + CreateWorkflowTemplateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use CreateWorkflowTemplateRequest.newBuilder() to construct. + private CreateWorkflowTemplateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CreateWorkflowTemplateRequest() { + parent_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CreateWorkflowTemplateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder subBuilder = null; + if (template_ != null) { + subBuilder = template_.toBuilder(); + } + template_ = input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowTemplate.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(template_); + template_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + public com.google.protobuf.ByteString + getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TEMPLATE_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1beta2.WorkflowTemplate template_; + /** + *
+   * Required. The Dataproc workflow template to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public boolean hasTemplate() { + return template_ != null; + } + /** + *
+   * Required. The Dataproc workflow template to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate() { + return template_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } + /** + *
+   * Required. The Dataproc workflow template to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder() { + return getTemplate(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (template_ != null) { + output.writeMessage(2, getTemplate()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (template_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getTemplate()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest other = (com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) obj; + + boolean result = true; + result = result && getParent() + .equals(other.getParent()); + result = result && (hasTemplate() == other.hasTemplate()); + if (hasTemplate()) { + result = result && getTemplate() + .equals(other.getTemplate()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasTemplate()) { + hash = (37 * hash) + TEMPLATE_FIELD_NUMBER; + hash = (53 * hash) + getTemplate().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to create a workflow template.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + if (templateBuilder_ == null) { + template_ = null; + } else { + template_ = null; + templateBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest build() { + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest result = new com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest(this); + result.parent_ = parent_; + if (templateBuilder_ == null) { + result.template_ = template_; + } else { + result.template_ = templateBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.hasTemplate()) { + mergeTemplate(other.getTemplate()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public com.google.protobuf.ByteString + getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder setParent( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder setParentBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.WorkflowTemplate template_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> templateBuilder_; + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public boolean hasTemplate() { + return templateBuilder_ != null || template_ != null; + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate() { + if (templateBuilder_ == null) { + return template_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } else { + return templateBuilder_.getMessage(); + } + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder setTemplate(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + template_ = value; + onChanged(); + } else { + templateBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder setTemplate( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder builderForValue) { + if (templateBuilder_ == null) { + template_ = builderForValue.build(); + onChanged(); + } else { + templateBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder mergeTemplate(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templateBuilder_ == null) { + if (template_ != null) { + template_ = + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.newBuilder(template_).mergeFrom(value).buildPartial(); + } else { + template_ = value; + } + onChanged(); + } else { + templateBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder clearTemplate() { + if (templateBuilder_ == null) { + template_ = null; + onChanged(); + } else { + template_ = null; + templateBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder getTemplateBuilder() { + + onChanged(); + return getTemplateFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder() { + if (templateBuilder_ != null) { + return templateBuilder_.getMessageOrBuilder(); + } else { + return template_ == null ? + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } + } + /** + *
+     * Required. The Dataproc workflow template to create.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> + getTemplateFieldBuilder() { + if (templateBuilder_ == null) { + templateBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder>( + getTemplate(), + getParentForChildren(), + isClean()); + template_ = null; + } + return templateBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) + private static final com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateWorkflowTemplateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateWorkflowTemplateRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequestOrBuilder.java new file mode 100644 index 000000000000..e24c8d224f0a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequestOrBuilder.java @@ -0,0 +1,56 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface CreateWorkflowTemplateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + java.lang.String getParent(); + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + com.google.protobuf.ByteString + getParentBytes(); + + /** + *
+   * Required. The Dataproc workflow template to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + boolean hasTemplate(); + /** + *
+   * Required. The Dataproc workflow template to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate(); + /** + *
+   * Required. The Dataproc workflow template to create.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java new file mode 100644 index 000000000000..f674bf8f654c --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java @@ -0,0 +1,1267 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to delete a cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DeleteClusterRequest} + */ +public final class DeleteClusterRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.DeleteClusterRequest) + DeleteClusterRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use DeleteClusterRequest.newBuilder() to construct. + private DeleteClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DeleteClusterRequest() { + projectId_ = ""; + region_ = ""; + clusterName_ = ""; + clusterUuid_ = ""; + requestId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DeleteClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.class, com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_UUID_FIELD_NUMBER = 4; + private volatile java.lang.Object clusterUuid_; + /** + *
+   * Optional. Specifying the `cluster_uuid` means the RPC should fail
+   * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } + } + /** + *
+   * Optional. Specifying the `cluster_uuid` means the RPC should fail
+   * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_ID_FIELD_NUMBER = 5; + private volatile java.lang.Object requestId_; + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + if (!getClusterUuidBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, clusterUuid_); + } + if (!getRequestIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + if (!getClusterUuidBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, clusterUuid_); + } + if (!getRequestIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.DeleteClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest other = (com.google.cloud.dataproc.v1beta2.DeleteClusterRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && getClusterUuid() + .equals(other.getClusterUuid()); + result = result && getRequestId() + .equals(other.getRequestId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; + hash = (53 * hash) + getClusterUuid().hashCode(); + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to delete a cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DeleteClusterRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.DeleteClusterRequest) + com.google.cloud.dataproc.v1beta2.DeleteClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.class, com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + clusterName_ = ""; + + clusterUuid_ = ""; + + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DeleteClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteClusterRequest build() { + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteClusterRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest result = new com.google.cloud.dataproc.v1beta2.DeleteClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.clusterName_ = clusterName_; + result.clusterUuid_ = clusterUuid_; + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.DeleteClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.DeleteClusterRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.DeleteClusterRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (!other.getClusterUuid().isEmpty()) { + clusterUuid_ = other.clusterUuid_; + onChanged(); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.DeleteClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.DeleteClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterUuid_ = ""; + /** + *
+     * Optional. Specifying the `cluster_uuid` means the RPC should fail
+     * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. Specifying the `cluster_uuid` means the RPC should fail
+     * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. Specifying the `cluster_uuid` means the RPC should fail
+     * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4; + */ + public Builder setClusterUuid( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterUuid_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Specifying the `cluster_uuid` means the RPC should fail
+     * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4; + */ + public Builder clearClusterUuid() { + + clusterUuid_ = getDefaultInstance().getClusterUuid(); + onChanged(); + return this; + } + /** + *
+     * Optional. Specifying the `cluster_uuid` means the RPC should fail
+     * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+     * 
+ * + * string cluster_uuid = 4; + */ + public Builder setClusterUuidBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterUuid_ = value; + onChanged(); + return this; + } + + private java.lang.Object requestId_ = ""; + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5; + */ + public Builder setRequestId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5; + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 5; + */ + public Builder setRequestIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.DeleteClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DeleteClusterRequest) + private static final com.google.cloud.dataproc.v1beta2.DeleteClusterRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.DeleteClusterRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.DeleteClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java new file mode 100644 index 000000000000..35921bb31e56 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java @@ -0,0 +1,119 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface DeleteClusterRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.DeleteClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Optional. Specifying the `cluster_uuid` means the RPC should fail
+   * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4; + */ + java.lang.String getClusterUuid(); + /** + *
+   * Optional. Specifying the `cluster_uuid` means the RPC should fail
+   * (with error NOT_FOUND) if cluster with specified UUID does not exist.
+   * 
+ * + * string cluster_uuid = 4; + */ + com.google.protobuf.ByteString + getClusterUuidBytes(); + + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5; + */ + java.lang.String getRequestId(); + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 5; + */ + com.google.protobuf.ByteString + getRequestIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequest.java new file mode 100644 index 000000000000..d5093167a684 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequest.java @@ -0,0 +1,894 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to delete a job.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DeleteJobRequest} + */ +public final class DeleteJobRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.DeleteJobRequest) + DeleteJobRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use DeleteJobRequest.newBuilder() to construct. + private DeleteJobRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DeleteJobRequest() { + projectId_ = ""; + region_ = ""; + jobId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DeleteJobRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DeleteJobRequest.class, com.google.cloud.dataproc.v1beta2.DeleteJobRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object jobId_; + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } + } + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, jobId_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, jobId_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.DeleteJobRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.DeleteJobRequest other = (com.google.cloud.dataproc.v1beta2.DeleteJobRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getJobId() + .equals(other.getJobId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.DeleteJobRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to delete a job.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DeleteJobRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.DeleteJobRequest) + com.google.cloud.dataproc.v1beta2.DeleteJobRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DeleteJobRequest.class, com.google.cloud.dataproc.v1beta2.DeleteJobRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.DeleteJobRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + jobId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteJobRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.DeleteJobRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteJobRequest build() { + com.google.cloud.dataproc.v1beta2.DeleteJobRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteJobRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.DeleteJobRequest result = new com.google.cloud.dataproc.v1beta2.DeleteJobRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.jobId_ = jobId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.DeleteJobRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.DeleteJobRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.DeleteJobRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.DeleteJobRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getJobId().isEmpty()) { + jobId_ = other.jobId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.DeleteJobRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.DeleteJobRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object jobId_ = ""; + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + jobId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder clearJobId() { + + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + jobId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.DeleteJobRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DeleteJobRequest) + private static final com.google.cloud.dataproc.v1beta2.DeleteJobRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.DeleteJobRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.DeleteJobRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteJobRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteJobRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteJobRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequestOrBuilder.java new file mode 100644 index 000000000000..8c218a982412 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequestOrBuilder.java @@ -0,0 +1,65 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface DeleteJobRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.DeleteJobRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + java.lang.String getJobId(); + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + com.google.protobuf.ByteString + getJobIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequest.java new file mode 100644 index 000000000000..a82e768c1ed6 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequest.java @@ -0,0 +1,675 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to delete a workflow template.
+ * Currently started workflows will remain running.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest} + */ +public final class DeleteWorkflowTemplateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) + DeleteWorkflowTemplateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use DeleteWorkflowTemplateRequest.newBuilder() to construct. + private DeleteWorkflowTemplateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DeleteWorkflowTemplateRequest() { + name_ = ""; + version_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DeleteWorkflowTemplateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: { + + version_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_FIELD_NUMBER = 2; + private int version_; + /** + *
+   * Optional. The version of workflow template to delete. If specified,
+   * will only delete the template if the current server version matches
+   * specified version.
+   * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (version_ != 0) { + output.writeInt32(2, version_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (version_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, version_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest other = (com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) obj; + + boolean result = true; + result = result && getName() + .equals(other.getName()); + result = result && (getVersion() + == other.getVersion()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to delete a workflow template.
+   * Currently started workflows will remain running.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + version_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest build() { + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest result = new com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest(this); + result.name_ = name_; + result.version_ = version_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getVersion() != 0) { + setVersion(other.getVersion()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int version_ ; + /** + *
+     * Optional. The version of workflow template to delete. If specified,
+     * will only delete the template if the current server version matches
+     * specified version.
+     * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + /** + *
+     * Optional. The version of workflow template to delete. If specified,
+     * will only delete the template if the current server version matches
+     * specified version.
+     * 
+ * + * int32 version = 2; + */ + public Builder setVersion(int value) { + + version_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The version of workflow template to delete. If specified,
+     * will only delete the template if the current server version matches
+     * specified version.
+     * 
+ * + * int32 version = 2; + */ + public Builder clearVersion() { + + version_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) + private static final com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteWorkflowTemplateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteWorkflowTemplateRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequestOrBuilder.java new file mode 100644 index 000000000000..ba386c704b9a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequestOrBuilder.java @@ -0,0 +1,42 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface DeleteWorkflowTemplateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + java.lang.String getName(); + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * Optional. The version of workflow template to delete. If specified,
+   * will only delete the template if the current server version matches
+   * specified version.
+   * 
+ * + * int32 version = 2; + */ + int getVersion(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequest.java new file mode 100644 index 000000000000..10b18ef75b12 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequest.java @@ -0,0 +1,894 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to collect cluster diagnostic information.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DiagnoseClusterRequest} + */ +public final class DiagnoseClusterRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) + DiagnoseClusterRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use DiagnoseClusterRequest.newBuilder() to construct. + private DiagnoseClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DiagnoseClusterRequest() { + projectId_ = ""; + region_ = ""; + clusterName_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DiagnoseClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.class, com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest other = (com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to collect cluster diagnostic information.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DiagnoseClusterRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.class, com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + clusterName_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest build() { + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest result = new com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.clusterName_ = clusterName_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) + private static final com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DiagnoseClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DiagnoseClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequestOrBuilder.java new file mode 100644 index 000000000000..996f0410a890 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequestOrBuilder.java @@ -0,0 +1,65 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface DiagnoseClusterRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResults.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResults.java new file mode 100644 index 000000000000..52075b29741e --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResults.java @@ -0,0 +1,591 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The location of diagnostic output.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DiagnoseClusterResults} + */ +public final class DiagnoseClusterResults extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.DiagnoseClusterResults) + DiagnoseClusterResultsOrBuilder { +private static final long serialVersionUID = 0L; + // Use DiagnoseClusterResults.newBuilder() to construct. + private DiagnoseClusterResults(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DiagnoseClusterResults() { + outputUri_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DiagnoseClusterResults( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + outputUri_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.class, com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.Builder.class); + } + + public static final int OUTPUT_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object outputUri_; + /** + *
+   * Output only. The Cloud Storage URI of the diagnostic output.
+   * The output report is a plain text file with a summary of collected
+   * diagnostics.
+   * 
+ * + * string output_uri = 1; + */ + public java.lang.String getOutputUri() { + java.lang.Object ref = outputUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputUri_ = s; + return s; + } + } + /** + *
+   * Output only. The Cloud Storage URI of the diagnostic output.
+   * The output report is a plain text file with a summary of collected
+   * diagnostics.
+   * 
+ * + * string output_uri = 1; + */ + public com.google.protobuf.ByteString + getOutputUriBytes() { + java.lang.Object ref = outputUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + outputUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getOutputUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, outputUri_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getOutputUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, outputUri_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults other = (com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults) obj; + + boolean result = true; + result = result && getOutputUri() + .equals(other.getOutputUri()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OUTPUT_URI_FIELD_NUMBER; + hash = (53 * hash) + getOutputUri().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The location of diagnostic output.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DiagnoseClusterResults} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.DiagnoseClusterResults) + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResultsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.class, com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + outputUri_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiagnoseClusterResults_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults build() { + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults buildPartial() { + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults result = new com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults(this); + result.outputUri_ = outputUri_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults other) { + if (other == com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults.getDefaultInstance()) return this; + if (!other.getOutputUri().isEmpty()) { + outputUri_ = other.outputUri_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object outputUri_ = ""; + /** + *
+     * Output only. The Cloud Storage URI of the diagnostic output.
+     * The output report is a plain text file with a summary of collected
+     * diagnostics.
+     * 
+ * + * string output_uri = 1; + */ + public java.lang.String getOutputUri() { + java.lang.Object ref = outputUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The Cloud Storage URI of the diagnostic output.
+     * The output report is a plain text file with a summary of collected
+     * diagnostics.
+     * 
+ * + * string output_uri = 1; + */ + public com.google.protobuf.ByteString + getOutputUriBytes() { + java.lang.Object ref = outputUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + outputUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The Cloud Storage URI of the diagnostic output.
+     * The output report is a plain text file with a summary of collected
+     * diagnostics.
+     * 
+ * + * string output_uri = 1; + */ + public Builder setOutputUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + outputUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The Cloud Storage URI of the diagnostic output.
+     * The output report is a plain text file with a summary of collected
+     * diagnostics.
+     * 
+ * + * string output_uri = 1; + */ + public Builder clearOutputUri() { + + outputUri_ = getDefaultInstance().getOutputUri(); + onChanged(); + return this; + } + /** + *
+     * Output only. The Cloud Storage URI of the diagnostic output.
+     * The output report is a plain text file with a summary of collected
+     * diagnostics.
+     * 
+ * + * string output_uri = 1; + */ + public Builder setOutputUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + outputUri_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.DiagnoseClusterResults) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DiagnoseClusterResults) + private static final com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults(); + } + + public static com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DiagnoseClusterResults parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DiagnoseClusterResults(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResultsOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResultsOrBuilder.java new file mode 100644 index 000000000000..e69cd83f97d9 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResultsOrBuilder.java @@ -0,0 +1,31 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface DiagnoseClusterResultsOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.DiagnoseClusterResults) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The Cloud Storage URI of the diagnostic output.
+   * The output report is a plain text file with a summary of collected
+   * diagnostics.
+   * 
+ * + * string output_uri = 1; + */ + java.lang.String getOutputUri(); + /** + *
+   * Output only. The Cloud Storage URI of the diagnostic output.
+   * The output report is a plain text file with a summary of collected
+   * diagnostics.
+   * 
+ * + * string output_uri = 1; + */ + com.google.protobuf.ByteString + getOutputUriBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfig.java new file mode 100644 index 000000000000..6a870c77166b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfig.java @@ -0,0 +1,759 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies the config of disk options for a group of VM instances.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DiskConfig} + */ +public final class DiskConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.DiskConfig) + DiskConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use DiskConfig.newBuilder() to construct. + private DiskConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DiskConfig() { + bootDiskType_ = ""; + bootDiskSizeGb_ = 0; + numLocalSsds_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DiskConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + bootDiskSizeGb_ = input.readInt32(); + break; + } + case 16: { + + numLocalSsds_ = input.readInt32(); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + bootDiskType_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiskConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiskConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DiskConfig.class, com.google.cloud.dataproc.v1beta2.DiskConfig.Builder.class); + } + + public static final int BOOT_DISK_TYPE_FIELD_NUMBER = 3; + private volatile java.lang.Object bootDiskType_; + /** + *
+   * Optional. Type of the boot disk (default is "pd-standard").
+   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * 
+ * + * string boot_disk_type = 3; + */ + public java.lang.String getBootDiskType() { + java.lang.Object ref = bootDiskType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bootDiskType_ = s; + return s; + } + } + /** + *
+   * Optional. Type of the boot disk (default is "pd-standard").
+   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * 
+ * + * string boot_disk_type = 3; + */ + public com.google.protobuf.ByteString + getBootDiskTypeBytes() { + java.lang.Object ref = bootDiskType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + bootDiskType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BOOT_DISK_SIZE_GB_FIELD_NUMBER = 1; + private int bootDiskSizeGb_; + /** + *
+   * Optional. Size in GB of the boot disk (default is 500GB).
+   * 
+ * + * int32 boot_disk_size_gb = 1; + */ + public int getBootDiskSizeGb() { + return bootDiskSizeGb_; + } + + public static final int NUM_LOCAL_SSDS_FIELD_NUMBER = 2; + private int numLocalSsds_; + /** + *
+   * Optional. Number of attached SSDs, from 0 to 4 (default is 0).
+   * If SSDs are not attached, the boot disk is used to store runtime logs and
+   * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
+   * If one or more SSDs are attached, this runtime bulk
+   * data is spread across them, and the boot disk contains only basic
+   * config and installed binaries.
+   * 
+ * + * int32 num_local_ssds = 2; + */ + public int getNumLocalSsds() { + return numLocalSsds_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (bootDiskSizeGb_ != 0) { + output.writeInt32(1, bootDiskSizeGb_); + } + if (numLocalSsds_ != 0) { + output.writeInt32(2, numLocalSsds_); + } + if (!getBootDiskTypeBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, bootDiskType_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (bootDiskSizeGb_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, bootDiskSizeGb_); + } + if (numLocalSsds_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, numLocalSsds_); + } + if (!getBootDiskTypeBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, bootDiskType_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.DiskConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.DiskConfig other = (com.google.cloud.dataproc.v1beta2.DiskConfig) obj; + + boolean result = true; + result = result && getBootDiskType() + .equals(other.getBootDiskType()); + result = result && (getBootDiskSizeGb() + == other.getBootDiskSizeGb()); + result = result && (getNumLocalSsds() + == other.getNumLocalSsds()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BOOT_DISK_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getBootDiskType().hashCode(); + hash = (37 * hash) + BOOT_DISK_SIZE_GB_FIELD_NUMBER; + hash = (53 * hash) + getBootDiskSizeGb(); + hash = (37 * hash) + NUM_LOCAL_SSDS_FIELD_NUMBER; + hash = (53 * hash) + getNumLocalSsds(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.DiskConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.DiskConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies the config of disk options for a group of VM instances.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.DiskConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.DiskConfig) + com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiskConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiskConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.DiskConfig.class, com.google.cloud.dataproc.v1beta2.DiskConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.DiskConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + bootDiskType_ = ""; + + bootDiskSizeGb_ = 0; + + numLocalSsds_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_DiskConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiskConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.DiskConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiskConfig build() { + com.google.cloud.dataproc.v1beta2.DiskConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiskConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.DiskConfig result = new com.google.cloud.dataproc.v1beta2.DiskConfig(this); + result.bootDiskType_ = bootDiskType_; + result.bootDiskSizeGb_ = bootDiskSizeGb_; + result.numLocalSsds_ = numLocalSsds_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.DiskConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.DiskConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.DiskConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.DiskConfig.getDefaultInstance()) return this; + if (!other.getBootDiskType().isEmpty()) { + bootDiskType_ = other.bootDiskType_; + onChanged(); + } + if (other.getBootDiskSizeGb() != 0) { + setBootDiskSizeGb(other.getBootDiskSizeGb()); + } + if (other.getNumLocalSsds() != 0) { + setNumLocalSsds(other.getNumLocalSsds()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.DiskConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.DiskConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object bootDiskType_ = ""; + /** + *
+     * Optional. Type of the boot disk (default is "pd-standard").
+     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * 
+ * + * string boot_disk_type = 3; + */ + public java.lang.String getBootDiskType() { + java.lang.Object ref = bootDiskType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + bootDiskType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. Type of the boot disk (default is "pd-standard").
+     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * 
+ * + * string boot_disk_type = 3; + */ + public com.google.protobuf.ByteString + getBootDiskTypeBytes() { + java.lang.Object ref = bootDiskType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + bootDiskType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. Type of the boot disk (default is "pd-standard").
+     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * 
+ * + * string boot_disk_type = 3; + */ + public Builder setBootDiskType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + bootDiskType_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Type of the boot disk (default is "pd-standard").
+     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * 
+ * + * string boot_disk_type = 3; + */ + public Builder clearBootDiskType() { + + bootDiskType_ = getDefaultInstance().getBootDiskType(); + onChanged(); + return this; + } + /** + *
+     * Optional. Type of the boot disk (default is "pd-standard").
+     * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+     * "pd-standard" (Persistent Disk Hard Disk Drive).
+     * 
+ * + * string boot_disk_type = 3; + */ + public Builder setBootDiskTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + bootDiskType_ = value; + onChanged(); + return this; + } + + private int bootDiskSizeGb_ ; + /** + *
+     * Optional. Size in GB of the boot disk (default is 500GB).
+     * 
+ * + * int32 boot_disk_size_gb = 1; + */ + public int getBootDiskSizeGb() { + return bootDiskSizeGb_; + } + /** + *
+     * Optional. Size in GB of the boot disk (default is 500GB).
+     * 
+ * + * int32 boot_disk_size_gb = 1; + */ + public Builder setBootDiskSizeGb(int value) { + + bootDiskSizeGb_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Size in GB of the boot disk (default is 500GB).
+     * 
+ * + * int32 boot_disk_size_gb = 1; + */ + public Builder clearBootDiskSizeGb() { + + bootDiskSizeGb_ = 0; + onChanged(); + return this; + } + + private int numLocalSsds_ ; + /** + *
+     * Optional. Number of attached SSDs, from 0 to 4 (default is 0).
+     * If SSDs are not attached, the boot disk is used to store runtime logs and
+     * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
+     * If one or more SSDs are attached, this runtime bulk
+     * data is spread across them, and the boot disk contains only basic
+     * config and installed binaries.
+     * 
+ * + * int32 num_local_ssds = 2; + */ + public int getNumLocalSsds() { + return numLocalSsds_; + } + /** + *
+     * Optional. Number of attached SSDs, from 0 to 4 (default is 0).
+     * If SSDs are not attached, the boot disk is used to store runtime logs and
+     * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
+     * If one or more SSDs are attached, this runtime bulk
+     * data is spread across them, and the boot disk contains only basic
+     * config and installed binaries.
+     * 
+ * + * int32 num_local_ssds = 2; + */ + public Builder setNumLocalSsds(int value) { + + numLocalSsds_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Number of attached SSDs, from 0 to 4 (default is 0).
+     * If SSDs are not attached, the boot disk is used to store runtime logs and
+     * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
+     * If one or more SSDs are attached, this runtime bulk
+     * data is spread across them, and the boot disk contains only basic
+     * config and installed binaries.
+     * 
+ * + * int32 num_local_ssds = 2; + */ + public Builder clearNumLocalSsds() { + + numLocalSsds_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.DiskConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DiskConfig) + private static final com.google.cloud.dataproc.v1beta2.DiskConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.DiskConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.DiskConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DiskConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DiskConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.DiskConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfigOrBuilder.java new file mode 100644 index 000000000000..8d69879aa07b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfigOrBuilder.java @@ -0,0 +1,54 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface DiskConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.DiskConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. Type of the boot disk (default is "pd-standard").
+   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * 
+ * + * string boot_disk_type = 3; + */ + java.lang.String getBootDiskType(); + /** + *
+   * Optional. Type of the boot disk (default is "pd-standard").
+   * Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
+   * "pd-standard" (Persistent Disk Hard Disk Drive).
+   * 
+ * + * string boot_disk_type = 3; + */ + com.google.protobuf.ByteString + getBootDiskTypeBytes(); + + /** + *
+   * Optional. Size in GB of the boot disk (default is 500GB).
+   * 
+ * + * int32 boot_disk_size_gb = 1; + */ + int getBootDiskSizeGb(); + + /** + *
+   * Optional. Number of attached SSDs, from 0 to 4 (default is 0).
+   * If SSDs are not attached, the boot disk is used to store runtime logs and
+   * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
+   * If one or more SSDs are attached, this runtime bulk
+   * data is spread across them, and the boot disk contains only basic
+   * config and installed binaries.
+   * 
+ * + * int32 num_local_ssds = 2; + */ + int getNumLocalSsds(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java new file mode 100644 index 000000000000..f1e9aae3edc1 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java @@ -0,0 +1,2255 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Common config settings for resources of Compute Engine cluster
+ * instances, applicable to all instances in the cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GceClusterConfig} + */ +public final class GceClusterConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.GceClusterConfig) + GceClusterConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use GceClusterConfig.newBuilder() to construct. + private GceClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GceClusterConfig() { + zoneUri_ = ""; + networkUri_ = ""; + subnetworkUri_ = ""; + internalIpOnly_ = false; + serviceAccount_ = ""; + serviceAccountScopes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + tags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GceClusterConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + zoneUri_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + networkUri_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + serviceAccountScopes_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + serviceAccountScopes_.add(s); + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tags_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tags_.add(s); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + metadata_ = com.google.protobuf.MapField.newMapField( + MetadataDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000080; + } + com.google.protobuf.MapEntry + metadata__ = input.readMessage( + MetadataDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + metadata_.getMutableMap().put( + metadata__.getKey(), metadata__.getValue()); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + + subnetworkUri_ = s; + break; + } + case 56: { + + internalIpOnly_ = input.readBool(); + break; + } + case 66: { + java.lang.String s = input.readStringRequireUtf8(); + + serviceAccount_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + serviceAccountScopes_ = serviceAccountScopes_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tags_ = tags_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 5: + return internalGetMetadata(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GceClusterConfig.class, com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder.class); + } + + private int bitField0_; + public static final int ZONE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object zoneUri_; + /** + *
+   * Optional. The zone where the Compute Engine cluster will be located.
+   * On a create request, it is required in the "global" region. If omitted
+   * in a non-global Cloud Dataproc region, the service will pick a zone in the
+   * corresponding Compute Engine region. On a get request, zone will always be
+   * present.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+   * * `projects/[project_id]/zones/[zone]`
+   * * `us-central1-f`
+   * 
+ * + * string zone_uri = 1; + */ + public java.lang.String getZoneUri() { + java.lang.Object ref = zoneUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zoneUri_ = s; + return s; + } + } + /** + *
+   * Optional. The zone where the Compute Engine cluster will be located.
+   * On a create request, it is required in the "global" region. If omitted
+   * in a non-global Cloud Dataproc region, the service will pick a zone in the
+   * corresponding Compute Engine region. On a get request, zone will always be
+   * present.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+   * * `projects/[project_id]/zones/[zone]`
+   * * `us-central1-f`
+   * 
+ * + * string zone_uri = 1; + */ + public com.google.protobuf.ByteString + getZoneUriBytes() { + java.lang.Object ref = zoneUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + zoneUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NETWORK_URI_FIELD_NUMBER = 2; + private volatile java.lang.Object networkUri_; + /** + *
+   * Optional. The Compute Engine network to be used for machine
+   * communications. Cannot be specified with subnetwork_uri. If neither
+   * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+   * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+   * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+   * * `projects/[project_id]/regions/global/default`
+   * * `default`
+   * 
+ * + * string network_uri = 2; + */ + public java.lang.String getNetworkUri() { + java.lang.Object ref = networkUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + networkUri_ = s; + return s; + } + } + /** + *
+   * Optional. The Compute Engine network to be used for machine
+   * communications. Cannot be specified with subnetwork_uri. If neither
+   * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+   * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+   * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+   * * `projects/[project_id]/regions/global/default`
+   * * `default`
+   * 
+ * + * string network_uri = 2; + */ + public com.google.protobuf.ByteString + getNetworkUriBytes() { + java.lang.Object ref = networkUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + networkUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SUBNETWORK_URI_FIELD_NUMBER = 6; + private volatile java.lang.Object subnetworkUri_; + /** + *
+   * Optional. The Compute Engine subnetwork to be used for machine
+   * communications. Cannot be specified with network_uri.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+   * * `projects/[project_id]/regions/us-east1/sub0`
+   * * `sub0`
+   * 
+ * + * string subnetwork_uri = 6; + */ + public java.lang.String getSubnetworkUri() { + java.lang.Object ref = subnetworkUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + subnetworkUri_ = s; + return s; + } + } + /** + *
+   * Optional. The Compute Engine subnetwork to be used for machine
+   * communications. Cannot be specified with network_uri.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+   * * `projects/[project_id]/regions/us-east1/sub0`
+   * * `sub0`
+   * 
+ * + * string subnetwork_uri = 6; + */ + public com.google.protobuf.ByteString + getSubnetworkUriBytes() { + java.lang.Object ref = subnetworkUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + subnetworkUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INTERNAL_IP_ONLY_FIELD_NUMBER = 7; + private boolean internalIpOnly_; + /** + *
+   * Optional. If true, all instances in the cluster will only have internal IP
+   * addresses. By default, clusters are not restricted to internal IP addresses,
+   * and will have ephemeral external IP addresses assigned to each instance.
+   * This `internal_ip_only` restriction can only be enabled for subnetwork
+   * enabled networks, and all off-cluster dependencies must be configured to be
+   * accessible without external IP addresses.
+   * 
+ * + * bool internal_ip_only = 7; + */ + public boolean getInternalIpOnly() { + return internalIpOnly_; + } + + public static final int SERVICE_ACCOUNT_FIELD_NUMBER = 8; + private volatile java.lang.Object serviceAccount_; + /** + *
+   * Optional. The service account of the instances. Defaults to the default
+   * Compute Engine service account. Custom service accounts need
+   * permissions equivalent to the following IAM roles:
+   * * roles/logging.logWriter
+   * * roles/storage.objectAdmin
+   * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+   * for more information).
+   * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+   * 
+ * + * string service_account = 8; + */ + public java.lang.String getServiceAccount() { + java.lang.Object ref = serviceAccount_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serviceAccount_ = s; + return s; + } + } + /** + *
+   * Optional. The service account of the instances. Defaults to the default
+   * Compute Engine service account. Custom service accounts need
+   * permissions equivalent to the following IAM roles:
+   * * roles/logging.logWriter
+   * * roles/storage.objectAdmin
+   * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+   * for more information).
+   * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+   * 
+ * + * string service_account = 8; + */ + public com.google.protobuf.ByteString + getServiceAccountBytes() { + java.lang.Object ref = serviceAccount_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serviceAccount_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERVICE_ACCOUNT_SCOPES_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList serviceAccountScopes_; + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + public com.google.protobuf.ProtocolStringList + getServiceAccountScopesList() { + return serviceAccountScopes_; + } + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + public int getServiceAccountScopesCount() { + return serviceAccountScopes_.size(); + } + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + public java.lang.String getServiceAccountScopes(int index) { + return serviceAccountScopes_.get(index); + } + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + public com.google.protobuf.ByteString + getServiceAccountScopesBytes(int index) { + return serviceAccountScopes_.getByteString(index); + } + + public static final int TAGS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList tags_; + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + public com.google.protobuf.ProtocolStringList + getTagsList() { + return tags_; + } + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + public int getTagsCount() { + return tags_.size(); + } + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + public java.lang.String getTags(int index) { + return tags_.get(index); + } + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + public com.google.protobuf.ByteString + getTagsBytes(int index) { + return tags_.getByteString(index); + } + + public static final int METADATA_FIELD_NUMBER = 5; + private static final class MetadataDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_MetadataEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> metadata_; + private com.google.protobuf.MapField + internalGetMetadata() { + if (metadata_ == null) { + return com.google.protobuf.MapField.emptyMapField( + MetadataDefaultEntryHolder.defaultEntry); + } + return metadata_; + } + + public int getMetadataCount() { + return internalGetMetadata().getMap().size(); + } + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + + public boolean containsMetadata( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetMetadata().getMap().containsKey(key); + } + /** + * Use {@link #getMetadataMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getMetadata() { + return getMetadataMap(); + } + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + + public java.util.Map getMetadataMap() { + return internalGetMetadata().getMap(); + } + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + + public java.lang.String getMetadataOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetMetadata().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + + public java.lang.String getMetadataOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetMetadata().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getZoneUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, zoneUri_); + } + if (!getNetworkUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, networkUri_); + } + for (int i = 0; i < serviceAccountScopes_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, serviceAccountScopes_.getRaw(i)); + } + for (int i = 0; i < tags_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, tags_.getRaw(i)); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetMetadata(), + MetadataDefaultEntryHolder.defaultEntry, + 5); + if (!getSubnetworkUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, subnetworkUri_); + } + if (internalIpOnly_ != false) { + output.writeBool(7, internalIpOnly_); + } + if (!getServiceAccountBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 8, serviceAccount_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getZoneUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, zoneUri_); + } + if (!getNetworkUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, networkUri_); + } + { + int dataSize = 0; + for (int i = 0; i < serviceAccountScopes_.size(); i++) { + dataSize += computeStringSizeNoTag(serviceAccountScopes_.getRaw(i)); + } + size += dataSize; + size += 1 * getServiceAccountScopesList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < tags_.size(); i++) { + dataSize += computeStringSizeNoTag(tags_.getRaw(i)); + } + size += dataSize; + size += 1 * getTagsList().size(); + } + for (java.util.Map.Entry entry + : internalGetMetadata().getMap().entrySet()) { + com.google.protobuf.MapEntry + metadata__ = MetadataDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, metadata__); + } + if (!getSubnetworkUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, subnetworkUri_); + } + if (internalIpOnly_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(7, internalIpOnly_); + } + if (!getServiceAccountBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, serviceAccount_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.GceClusterConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.GceClusterConfig other = (com.google.cloud.dataproc.v1beta2.GceClusterConfig) obj; + + boolean result = true; + result = result && getZoneUri() + .equals(other.getZoneUri()); + result = result && getNetworkUri() + .equals(other.getNetworkUri()); + result = result && getSubnetworkUri() + .equals(other.getSubnetworkUri()); + result = result && (getInternalIpOnly() + == other.getInternalIpOnly()); + result = result && getServiceAccount() + .equals(other.getServiceAccount()); + result = result && getServiceAccountScopesList() + .equals(other.getServiceAccountScopesList()); + result = result && getTagsList() + .equals(other.getTagsList()); + result = result && internalGetMetadata().equals( + other.internalGetMetadata()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ZONE_URI_FIELD_NUMBER; + hash = (53 * hash) + getZoneUri().hashCode(); + hash = (37 * hash) + NETWORK_URI_FIELD_NUMBER; + hash = (53 * hash) + getNetworkUri().hashCode(); + hash = (37 * hash) + SUBNETWORK_URI_FIELD_NUMBER; + hash = (53 * hash) + getSubnetworkUri().hashCode(); + hash = (37 * hash) + INTERNAL_IP_ONLY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getInternalIpOnly()); + hash = (37 * hash) + SERVICE_ACCOUNT_FIELD_NUMBER; + hash = (53 * hash) + getServiceAccount().hashCode(); + if (getServiceAccountScopesCount() > 0) { + hash = (37 * hash) + SERVICE_ACCOUNT_SCOPES_FIELD_NUMBER; + hash = (53 * hash) + getServiceAccountScopesList().hashCode(); + } + if (getTagsCount() > 0) { + hash = (37 * hash) + TAGS_FIELD_NUMBER; + hash = (53 * hash) + getTagsList().hashCode(); + } + if (!internalGetMetadata().getMap().isEmpty()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + internalGetMetadata().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.GceClusterConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Common config settings for resources of Compute Engine cluster
+   * instances, applicable to all instances in the cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GceClusterConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.GceClusterConfig) + com.google.cloud.dataproc.v1beta2.GceClusterConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 5: + return internalGetMetadata(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 5: + return internalGetMutableMetadata(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GceClusterConfig.class, com.google.cloud.dataproc.v1beta2.GceClusterConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.GceClusterConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + zoneUri_ = ""; + + networkUri_ = ""; + + subnetworkUri_ = ""; + + internalIpOnly_ = false; + + serviceAccount_ = ""; + + serviceAccountScopes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + tags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + internalGetMutableMetadata().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GceClusterConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GceClusterConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.GceClusterConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GceClusterConfig build() { + com.google.cloud.dataproc.v1beta2.GceClusterConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GceClusterConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.GceClusterConfig result = new com.google.cloud.dataproc.v1beta2.GceClusterConfig(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.zoneUri_ = zoneUri_; + result.networkUri_ = networkUri_; + result.subnetworkUri_ = subnetworkUri_; + result.internalIpOnly_ = internalIpOnly_; + result.serviceAccount_ = serviceAccount_; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + serviceAccountScopes_ = serviceAccountScopes_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.serviceAccountScopes_ = serviceAccountScopes_; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tags_ = tags_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tags_ = tags_; + result.metadata_ = internalGetMetadata(); + result.metadata_.makeImmutable(); + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.GceClusterConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.GceClusterConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.GceClusterConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.GceClusterConfig.getDefaultInstance()) return this; + if (!other.getZoneUri().isEmpty()) { + zoneUri_ = other.zoneUri_; + onChanged(); + } + if (!other.getNetworkUri().isEmpty()) { + networkUri_ = other.networkUri_; + onChanged(); + } + if (!other.getSubnetworkUri().isEmpty()) { + subnetworkUri_ = other.subnetworkUri_; + onChanged(); + } + if (other.getInternalIpOnly() != false) { + setInternalIpOnly(other.getInternalIpOnly()); + } + if (!other.getServiceAccount().isEmpty()) { + serviceAccount_ = other.serviceAccount_; + onChanged(); + } + if (!other.serviceAccountScopes_.isEmpty()) { + if (serviceAccountScopes_.isEmpty()) { + serviceAccountScopes_ = other.serviceAccountScopes_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureServiceAccountScopesIsMutable(); + serviceAccountScopes_.addAll(other.serviceAccountScopes_); + } + onChanged(); + } + if (!other.tags_.isEmpty()) { + if (tags_.isEmpty()) { + tags_ = other.tags_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTagsIsMutable(); + tags_.addAll(other.tags_); + } + onChanged(); + } + internalGetMutableMetadata().mergeFrom( + other.internalGetMetadata()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.GceClusterConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.GceClusterConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object zoneUri_ = ""; + /** + *
+     * Optional. The zone where the Compute Engine cluster will be located.
+     * On a create request, it is required in the "global" region. If omitted
+     * in a non-global Cloud Dataproc region, the service will pick a zone in the
+     * corresponding Compute Engine region. On a get request, zone will always be
+     * present.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+     * * `projects/[project_id]/zones/[zone]`
+     * * `us-central1-f`
+     * 
+ * + * string zone_uri = 1; + */ + public java.lang.String getZoneUri() { + java.lang.Object ref = zoneUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + zoneUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The zone where the Compute Engine cluster will be located.
+     * On a create request, it is required in the "global" region. If omitted
+     * in a non-global Cloud Dataproc region, the service will pick a zone in the
+     * corresponding Compute Engine region. On a get request, zone will always be
+     * present.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+     * * `projects/[project_id]/zones/[zone]`
+     * * `us-central1-f`
+     * 
+ * + * string zone_uri = 1; + */ + public com.google.protobuf.ByteString + getZoneUriBytes() { + java.lang.Object ref = zoneUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + zoneUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The zone where the Compute Engine cluster will be located.
+     * On a create request, it is required in the "global" region. If omitted
+     * in a non-global Cloud Dataproc region, the service will pick a zone in the
+     * corresponding Compute Engine region. On a get request, zone will always be
+     * present.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+     * * `projects/[project_id]/zones/[zone]`
+     * * `us-central1-f`
+     * 
+ * + * string zone_uri = 1; + */ + public Builder setZoneUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + zoneUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The zone where the Compute Engine cluster will be located.
+     * On a create request, it is required in the "global" region. If omitted
+     * in a non-global Cloud Dataproc region, the service will pick a zone in the
+     * corresponding Compute Engine region. On a get request, zone will always be
+     * present.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+     * * `projects/[project_id]/zones/[zone]`
+     * * `us-central1-f`
+     * 
+ * + * string zone_uri = 1; + */ + public Builder clearZoneUri() { + + zoneUri_ = getDefaultInstance().getZoneUri(); + onChanged(); + return this; + } + /** + *
+     * Optional. The zone where the Compute Engine cluster will be located.
+     * On a create request, it is required in the "global" region. If omitted
+     * in a non-global Cloud Dataproc region, the service will pick a zone in the
+     * corresponding Compute Engine region. On a get request, zone will always be
+     * present.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+     * * `projects/[project_id]/zones/[zone]`
+     * * `us-central1-f`
+     * 
+ * + * string zone_uri = 1; + */ + public Builder setZoneUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + zoneUri_ = value; + onChanged(); + return this; + } + + private java.lang.Object networkUri_ = ""; + /** + *
+     * Optional. The Compute Engine network to be used for machine
+     * communications. Cannot be specified with subnetwork_uri. If neither
+     * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+     * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+     * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+     * * `projects/[project_id]/regions/global/default`
+     * * `default`
+     * 
+ * + * string network_uri = 2; + */ + public java.lang.String getNetworkUri() { + java.lang.Object ref = networkUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + networkUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The Compute Engine network to be used for machine
+     * communications. Cannot be specified with subnetwork_uri. If neither
+     * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+     * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+     * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+     * * `projects/[project_id]/regions/global/default`
+     * * `default`
+     * 
+ * + * string network_uri = 2; + */ + public com.google.protobuf.ByteString + getNetworkUriBytes() { + java.lang.Object ref = networkUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + networkUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The Compute Engine network to be used for machine
+     * communications. Cannot be specified with subnetwork_uri. If neither
+     * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+     * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+     * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+     * * `projects/[project_id]/regions/global/default`
+     * * `default`
+     * 
+ * + * string network_uri = 2; + */ + public Builder setNetworkUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + networkUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The Compute Engine network to be used for machine
+     * communications. Cannot be specified with subnetwork_uri. If neither
+     * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+     * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+     * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+     * * `projects/[project_id]/regions/global/default`
+     * * `default`
+     * 
+ * + * string network_uri = 2; + */ + public Builder clearNetworkUri() { + + networkUri_ = getDefaultInstance().getNetworkUri(); + onChanged(); + return this; + } + /** + *
+     * Optional. The Compute Engine network to be used for machine
+     * communications. Cannot be specified with subnetwork_uri. If neither
+     * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+     * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+     * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+     * * `projects/[project_id]/regions/global/default`
+     * * `default`
+     * 
+ * + * string network_uri = 2; + */ + public Builder setNetworkUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + networkUri_ = value; + onChanged(); + return this; + } + + private java.lang.Object subnetworkUri_ = ""; + /** + *
+     * Optional. The Compute Engine subnetwork to be used for machine
+     * communications. Cannot be specified with network_uri.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+     * * `projects/[project_id]/regions/us-east1/sub0`
+     * * `sub0`
+     * 
+ * + * string subnetwork_uri = 6; + */ + public java.lang.String getSubnetworkUri() { + java.lang.Object ref = subnetworkUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + subnetworkUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The Compute Engine subnetwork to be used for machine
+     * communications. Cannot be specified with network_uri.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+     * * `projects/[project_id]/regions/us-east1/sub0`
+     * * `sub0`
+     * 
+ * + * string subnetwork_uri = 6; + */ + public com.google.protobuf.ByteString + getSubnetworkUriBytes() { + java.lang.Object ref = subnetworkUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + subnetworkUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The Compute Engine subnetwork to be used for machine
+     * communications. Cannot be specified with network_uri.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+     * * `projects/[project_id]/regions/us-east1/sub0`
+     * * `sub0`
+     * 
+ * + * string subnetwork_uri = 6; + */ + public Builder setSubnetworkUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + subnetworkUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The Compute Engine subnetwork to be used for machine
+     * communications. Cannot be specified with network_uri.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+     * * `projects/[project_id]/regions/us-east1/sub0`
+     * * `sub0`
+     * 
+ * + * string subnetwork_uri = 6; + */ + public Builder clearSubnetworkUri() { + + subnetworkUri_ = getDefaultInstance().getSubnetworkUri(); + onChanged(); + return this; + } + /** + *
+     * Optional. The Compute Engine subnetwork to be used for machine
+     * communications. Cannot be specified with network_uri.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+     * * `projects/[project_id]/regions/us-east1/sub0`
+     * * `sub0`
+     * 
+ * + * string subnetwork_uri = 6; + */ + public Builder setSubnetworkUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + subnetworkUri_ = value; + onChanged(); + return this; + } + + private boolean internalIpOnly_ ; + /** + *
+     * Optional. If true, all instances in the cluster will only have internal IP
+     * addresses. By default, clusters are not restricted to internal IP addresses,
+     * and will have ephemeral external IP addresses assigned to each instance.
+     * This `internal_ip_only` restriction can only be enabled for subnetwork
+     * enabled networks, and all off-cluster dependencies must be configured to be
+     * accessible without external IP addresses.
+     * 
+ * + * bool internal_ip_only = 7; + */ + public boolean getInternalIpOnly() { + return internalIpOnly_; + } + /** + *
+     * Optional. If true, all instances in the cluster will only have internal IP
+     * addresses. By default, clusters are not restricted to internal IP addresses,
+     * and will have ephemeral external IP addresses assigned to each instance.
+     * This `internal_ip_only` restriction can only be enabled for subnetwork
+     * enabled networks, and all off-cluster dependencies must be configured to be
+     * accessible without external IP addresses.
+     * 
+ * + * bool internal_ip_only = 7; + */ + public Builder setInternalIpOnly(boolean value) { + + internalIpOnly_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. If true, all instances in the cluster will only have internal IP
+     * addresses. By default, clusters are not restricted to internal IP addresses,
+     * and will have ephemeral external IP addresses assigned to each instance.
+     * This `internal_ip_only` restriction can only be enabled for subnetwork
+     * enabled networks, and all off-cluster dependencies must be configured to be
+     * accessible without external IP addresses.
+     * 
+ * + * bool internal_ip_only = 7; + */ + public Builder clearInternalIpOnly() { + + internalIpOnly_ = false; + onChanged(); + return this; + } + + private java.lang.Object serviceAccount_ = ""; + /** + *
+     * Optional. The service account of the instances. Defaults to the default
+     * Compute Engine service account. Custom service accounts need
+     * permissions equivalent to the following IAM roles:
+     * * roles/logging.logWriter
+     * * roles/storage.objectAdmin
+     * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+     * for more information).
+     * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+     * 
+ * + * string service_account = 8; + */ + public java.lang.String getServiceAccount() { + java.lang.Object ref = serviceAccount_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serviceAccount_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The service account of the instances. Defaults to the default
+     * Compute Engine service account. Custom service accounts need
+     * permissions equivalent to the following IAM roles:
+     * * roles/logging.logWriter
+     * * roles/storage.objectAdmin
+     * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+     * for more information).
+     * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+     * 
+ * + * string service_account = 8; + */ + public com.google.protobuf.ByteString + getServiceAccountBytes() { + java.lang.Object ref = serviceAccount_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serviceAccount_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The service account of the instances. Defaults to the default
+     * Compute Engine service account. Custom service accounts need
+     * permissions equivalent to the following IAM roles:
+     * * roles/logging.logWriter
+     * * roles/storage.objectAdmin
+     * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+     * for more information).
+     * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+     * 
+ * + * string service_account = 8; + */ + public Builder setServiceAccount( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + serviceAccount_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The service account of the instances. Defaults to the default
+     * Compute Engine service account. Custom service accounts need
+     * permissions equivalent to the following IAM roles:
+     * * roles/logging.logWriter
+     * * roles/storage.objectAdmin
+     * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+     * for more information).
+     * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+     * 
+ * + * string service_account = 8; + */ + public Builder clearServiceAccount() { + + serviceAccount_ = getDefaultInstance().getServiceAccount(); + onChanged(); + return this; + } + /** + *
+     * Optional. The service account of the instances. Defaults to the default
+     * Compute Engine service account. Custom service accounts need
+     * permissions equivalent to the following IAM roles:
+     * * roles/logging.logWriter
+     * * roles/storage.objectAdmin
+     * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+     * for more information).
+     * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+     * 
+ * + * string service_account = 8; + */ + public Builder setServiceAccountBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + serviceAccount_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList serviceAccountScopes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureServiceAccountScopesIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + serviceAccountScopes_ = new com.google.protobuf.LazyStringArrayList(serviceAccountScopes_); + bitField0_ |= 0x00000020; + } + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public com.google.protobuf.ProtocolStringList + getServiceAccountScopesList() { + return serviceAccountScopes_.getUnmodifiableView(); + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public int getServiceAccountScopesCount() { + return serviceAccountScopes_.size(); + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public java.lang.String getServiceAccountScopes(int index) { + return serviceAccountScopes_.get(index); + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public com.google.protobuf.ByteString + getServiceAccountScopesBytes(int index) { + return serviceAccountScopes_.getByteString(index); + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public Builder setServiceAccountScopes( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServiceAccountScopesIsMutable(); + serviceAccountScopes_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public Builder addServiceAccountScopes( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServiceAccountScopesIsMutable(); + serviceAccountScopes_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public Builder addAllServiceAccountScopes( + java.lang.Iterable values) { + ensureServiceAccountScopesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, serviceAccountScopes_); + onChanged(); + return this; + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public Builder clearServiceAccountScopes() { + serviceAccountScopes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + *
+     * Optional. The URIs of service account scopes to be included in
+     * Compute Engine instances. The following base set of scopes is always
+     * included:
+     * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+     * * https://www.googleapis.com/auth/devstorage.read_write
+     * * https://www.googleapis.com/auth/logging.write
+     * If no scopes are specified, the following defaults are also provided:
+     * * https://www.googleapis.com/auth/bigquery
+     * * https://www.googleapis.com/auth/bigtable.admin.table
+     * * https://www.googleapis.com/auth/bigtable.data
+     * * https://www.googleapis.com/auth/devstorage.full_control
+     * 
+ * + * repeated string service_account_scopes = 3; + */ + public Builder addServiceAccountScopesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureServiceAccountScopesIsMutable(); + serviceAccountScopes_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList tags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureTagsIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tags_ = new com.google.protobuf.LazyStringArrayList(tags_); + bitField0_ |= 0x00000040; + } + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public com.google.protobuf.ProtocolStringList + getTagsList() { + return tags_.getUnmodifiableView(); + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public int getTagsCount() { + return tags_.size(); + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public java.lang.String getTags(int index) { + return tags_.get(index); + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public com.google.protobuf.ByteString + getTagsBytes(int index) { + return tags_.getByteString(index); + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public Builder setTags( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public Builder addTags( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.add(value); + onChanged(); + return this; + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public Builder addAllTags( + java.lang.Iterable values) { + ensureTagsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, tags_); + onChanged(); + return this; + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public Builder clearTags() { + tags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + /** + *
+     * The Compute Engine tags to add to all instances (see
+     * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+     * 
+ * + * repeated string tags = 4; + */ + public Builder addTagsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureTagsIsMutable(); + tags_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> metadata_; + private com.google.protobuf.MapField + internalGetMetadata() { + if (metadata_ == null) { + return com.google.protobuf.MapField.emptyMapField( + MetadataDefaultEntryHolder.defaultEntry); + } + return metadata_; + } + private com.google.protobuf.MapField + internalGetMutableMetadata() { + onChanged();; + if (metadata_ == null) { + metadata_ = com.google.protobuf.MapField.newMapField( + MetadataDefaultEntryHolder.defaultEntry); + } + if (!metadata_.isMutable()) { + metadata_ = metadata_.copy(); + } + return metadata_; + } + + public int getMetadataCount() { + return internalGetMetadata().getMap().size(); + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + + public boolean containsMetadata( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetMetadata().getMap().containsKey(key); + } + /** + * Use {@link #getMetadataMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getMetadata() { + return getMetadataMap(); + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + + public java.util.Map getMetadataMap() { + return internalGetMetadata().getMap(); + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + + public java.lang.String getMetadataOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetMetadata().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + + public java.lang.String getMetadataOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetMetadata().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearMetadata() { + internalGetMutableMetadata().getMutableMap() + .clear(); + return this; + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + + public Builder removeMetadata( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableMetadata().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableMetadata() { + return internalGetMutableMetadata().getMutableMap(); + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + public Builder putMetadata( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableMetadata().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * The Compute Engine metadata entries to add to all instances (see
+     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * 
+ * + * map<string, string> metadata = 5; + */ + + public Builder putAllMetadata( + java.util.Map values) { + internalGetMutableMetadata().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.GceClusterConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GceClusterConfig) + private static final com.google.cloud.dataproc.v1beta2.GceClusterConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.GceClusterConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.GceClusterConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GceClusterConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GceClusterConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GceClusterConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java new file mode 100644 index 000000000000..ede8b89447bd --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java @@ -0,0 +1,324 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface GceClusterConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.GceClusterConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. The zone where the Compute Engine cluster will be located.
+   * On a create request, it is required in the "global" region. If omitted
+   * in a non-global Cloud Dataproc region, the service will pick a zone in the
+   * corresponding Compute Engine region. On a get request, zone will always be
+   * present.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+   * * `projects/[project_id]/zones/[zone]`
+   * * `us-central1-f`
+   * 
+ * + * string zone_uri = 1; + */ + java.lang.String getZoneUri(); + /** + *
+   * Optional. The zone where the Compute Engine cluster will be located.
+   * On a create request, it is required in the "global" region. If omitted
+   * in a non-global Cloud Dataproc region, the service will pick a zone in the
+   * corresponding Compute Engine region. On a get request, zone will always be
+   * present.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
+   * * `projects/[project_id]/zones/[zone]`
+   * * `us-central1-f`
+   * 
+ * + * string zone_uri = 1; + */ + com.google.protobuf.ByteString + getZoneUriBytes(); + + /** + *
+   * Optional. The Compute Engine network to be used for machine
+   * communications. Cannot be specified with subnetwork_uri. If neither
+   * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+   * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+   * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+   * * `projects/[project_id]/regions/global/default`
+   * * `default`
+   * 
+ * + * string network_uri = 2; + */ + java.lang.String getNetworkUri(); + /** + *
+   * Optional. The Compute Engine network to be used for machine
+   * communications. Cannot be specified with subnetwork_uri. If neither
+   * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
+   * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
+   * [Using Subnetworks](/compute/docs/subnetworks) for more information).
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
+   * * `projects/[project_id]/regions/global/default`
+   * * `default`
+   * 
+ * + * string network_uri = 2; + */ + com.google.protobuf.ByteString + getNetworkUriBytes(); + + /** + *
+   * Optional. The Compute Engine subnetwork to be used for machine
+   * communications. Cannot be specified with network_uri.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+   * * `projects/[project_id]/regions/us-east1/sub0`
+   * * `sub0`
+   * 
+ * + * string subnetwork_uri = 6; + */ + java.lang.String getSubnetworkUri(); + /** + *
+   * Optional. The Compute Engine subnetwork to be used for machine
+   * communications. Cannot be specified with network_uri.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
+   * * `projects/[project_id]/regions/us-east1/sub0`
+   * * `sub0`
+   * 
+ * + * string subnetwork_uri = 6; + */ + com.google.protobuf.ByteString + getSubnetworkUriBytes(); + + /** + *
+   * Optional. If true, all instances in the cluster will only have internal IP
+   * addresses. By default, clusters are not restricted to internal IP addresses,
+   * and will have ephemeral external IP addresses assigned to each instance.
+   * This `internal_ip_only` restriction can only be enabled for subnetwork
+   * enabled networks, and all off-cluster dependencies must be configured to be
+   * accessible without external IP addresses.
+   * 
+ * + * bool internal_ip_only = 7; + */ + boolean getInternalIpOnly(); + + /** + *
+   * Optional. The service account of the instances. Defaults to the default
+   * Compute Engine service account. Custom service accounts need
+   * permissions equivalent to the following IAM roles:
+   * * roles/logging.logWriter
+   * * roles/storage.objectAdmin
+   * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+   * for more information).
+   * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+   * 
+ * + * string service_account = 8; + */ + java.lang.String getServiceAccount(); + /** + *
+   * Optional. The service account of the instances. Defaults to the default
+   * Compute Engine service account. Custom service accounts need
+   * permissions equivalent to the following IAM roles:
+   * * roles/logging.logWriter
+   * * roles/storage.objectAdmin
+   * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+   * for more information).
+   * Example: `[account_id]@[project_id].iam.gserviceaccount.com`
+   * 
+ * + * string service_account = 8; + */ + com.google.protobuf.ByteString + getServiceAccountBytes(); + + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + java.util.List + getServiceAccountScopesList(); + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + int getServiceAccountScopesCount(); + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + java.lang.String getServiceAccountScopes(int index); + /** + *
+   * Optional. The URIs of service account scopes to be included in
+   * Compute Engine instances. The following base set of scopes is always
+   * included:
+   * * https://www.googleapis.com/auth/cloud.useraccounts.readonly
+   * * https://www.googleapis.com/auth/devstorage.read_write
+   * * https://www.googleapis.com/auth/logging.write
+   * If no scopes are specified, the following defaults are also provided:
+   * * https://www.googleapis.com/auth/bigquery
+   * * https://www.googleapis.com/auth/bigtable.admin.table
+   * * https://www.googleapis.com/auth/bigtable.data
+   * * https://www.googleapis.com/auth/devstorage.full_control
+   * 
+ * + * repeated string service_account_scopes = 3; + */ + com.google.protobuf.ByteString + getServiceAccountScopesBytes(int index); + + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + java.util.List + getTagsList(); + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + int getTagsCount(); + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + java.lang.String getTags(int index); + /** + *
+   * The Compute Engine tags to add to all instances (see
+   * [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
+   * 
+ * + * repeated string tags = 4; + */ + com.google.protobuf.ByteString + getTagsBytes(int index); + + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + int getMetadataCount(); + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + boolean containsMetadata( + java.lang.String key); + /** + * Use {@link #getMetadataMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getMetadata(); + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + java.util.Map + getMetadataMap(); + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + + java.lang.String getMetadataOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * The Compute Engine metadata entries to add to all instances (see
+   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * 
+ * + * map<string, string> metadata = 5; + */ + + java.lang.String getMetadataOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequest.java new file mode 100644 index 000000000000..bdfbc4c7d84c --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequest.java @@ -0,0 +1,894 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Request to get the resource representation for a cluster in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GetClusterRequest} + */ +public final class GetClusterRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.GetClusterRequest) + GetClusterRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetClusterRequest.newBuilder() to construct. + private GetClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetClusterRequest() { + projectId_ = ""; + region_ = ""; + clusterName_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GetClusterRequest.class, com.google.cloud.dataproc.v1beta2.GetClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.GetClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.GetClusterRequest other = (com.google.cloud.dataproc.v1beta2.GetClusterRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.GetClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Request to get the resource representation for a cluster in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GetClusterRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.GetClusterRequest) + com.google.cloud.dataproc.v1beta2.GetClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GetClusterRequest.class, com.google.cloud.dataproc.v1beta2.GetClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.GetClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + clusterName_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_GetClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.GetClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetClusterRequest build() { + com.google.cloud.dataproc.v1beta2.GetClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetClusterRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.GetClusterRequest result = new com.google.cloud.dataproc.v1beta2.GetClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.clusterName_ = clusterName_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.GetClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.GetClusterRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.GetClusterRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.GetClusterRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.GetClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.GetClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.GetClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GetClusterRequest) + private static final com.google.cloud.dataproc.v1beta2.GetClusterRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.GetClusterRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.GetClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequestOrBuilder.java new file mode 100644 index 000000000000..bd2c6c35c1ed --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequestOrBuilder.java @@ -0,0 +1,65 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface GetClusterRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.GetClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequest.java new file mode 100644 index 000000000000..777635167210 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequest.java @@ -0,0 +1,894 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to get the resource representation for a job in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GetJobRequest} + */ +public final class GetJobRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.GetJobRequest) + GetJobRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetJobRequest.newBuilder() to construct. + private GetJobRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetJobRequest() { + projectId_ = ""; + region_ = ""; + jobId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetJobRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GetJobRequest.class, com.google.cloud.dataproc.v1beta2.GetJobRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object jobId_; + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } + } + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, jobId_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, jobId_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.GetJobRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.GetJobRequest other = (com.google.cloud.dataproc.v1beta2.GetJobRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getJobId() + .equals(other.getJobId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetJobRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.GetJobRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to get the resource representation for a job in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GetJobRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.GetJobRequest) + com.google.cloud.dataproc.v1beta2.GetJobRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GetJobRequest.class, com.google.cloud.dataproc.v1beta2.GetJobRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.GetJobRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + jobId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetJobRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.GetJobRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetJobRequest build() { + com.google.cloud.dataproc.v1beta2.GetJobRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetJobRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.GetJobRequest result = new com.google.cloud.dataproc.v1beta2.GetJobRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.jobId_ = jobId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.GetJobRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.GetJobRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.GetJobRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.GetJobRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getJobId().isEmpty()) { + jobId_ = other.jobId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.GetJobRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.GetJobRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object jobId_ = ""; + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + jobId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder clearJobId() { + + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + jobId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.GetJobRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GetJobRequest) + private static final com.google.cloud.dataproc.v1beta2.GetJobRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.GetJobRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.GetJobRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetJobRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetJobRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetJobRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequestOrBuilder.java new file mode 100644 index 000000000000..363358a93bf8 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequestOrBuilder.java @@ -0,0 +1,65 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface GetJobRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.GetJobRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + java.lang.String getJobId(); + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 2; + */ + com.google.protobuf.ByteString + getJobIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequest.java new file mode 100644 index 000000000000..88e7fb80e3a7 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequest.java @@ -0,0 +1,673 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to fetch a workflow template.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest} + */ +public final class GetWorkflowTemplateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) + GetWorkflowTemplateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetWorkflowTemplateRequest.newBuilder() to construct. + private GetWorkflowTemplateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetWorkflowTemplateRequest() { + name_ = ""; + version_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetWorkflowTemplateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: { + + version_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_FIELD_NUMBER = 2; + private int version_; + /** + *
+   * Optional. The version of workflow template to retrieve. Only previously
+   * instatiated versions can be retrieved.
+   * If unspecified, retrieves the current version.
+   * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (version_ != 0) { + output.writeInt32(2, version_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (version_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, version_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest other = (com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) obj; + + boolean result = true; + result = result && getName() + .equals(other.getName()); + result = result && (getVersion() + == other.getVersion()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to fetch a workflow template.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + version_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest build() { + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest result = new com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest(this); + result.name_ = name_; + result.version_ = version_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getVersion() != 0) { + setVersion(other.getVersion()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int version_ ; + /** + *
+     * Optional. The version of workflow template to retrieve. Only previously
+     * instatiated versions can be retrieved.
+     * If unspecified, retrieves the current version.
+     * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + /** + *
+     * Optional. The version of workflow template to retrieve. Only previously
+     * instatiated versions can be retrieved.
+     * If unspecified, retrieves the current version.
+     * 
+ * + * int32 version = 2; + */ + public Builder setVersion(int value) { + + version_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The version of workflow template to retrieve. Only previously
+     * instatiated versions can be retrieved.
+     * If unspecified, retrieves the current version.
+     * 
+ * + * int32 version = 2; + */ + public Builder clearVersion() { + + version_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) + private static final com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetWorkflowTemplateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetWorkflowTemplateRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequestOrBuilder.java new file mode 100644 index 000000000000..47f89bee626a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequestOrBuilder.java @@ -0,0 +1,42 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface GetWorkflowTemplateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + java.lang.String getName(); + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * Optional. The version of workflow template to retrieve. Only previously
+   * instatiated versions can be retrieved.
+   * If unspecified, retrieves the current version.
+   * 
+ * + * int32 version = 2; + */ + int getVersion(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java new file mode 100644 index 000000000000..81ee19e7d659 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java @@ -0,0 +1,2469 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job for running
+ * [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+ * jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.HadoopJob} + */ +public final class HadoopJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.HadoopJob) + HadoopJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use HadoopJob.newBuilder() to construct. + private HadoopJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private HadoopJob() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private HadoopJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + driverCase_ = 1; + driver_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + driverCase_ = 2; + driver_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + args_.add(s); + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000008; + } + jarFileUris_.add(s); + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + fileUris_.add(s); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + archiveUris_.add(s); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000040; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + case 66: { + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 7: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HadoopJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.HadoopJob.class, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder.class); + } + + private int bitField0_; + private int driverCase_ = 0; + private java.lang.Object driver_; + public enum DriverCase + implements com.google.protobuf.Internal.EnumLite { + MAIN_JAR_FILE_URI(1), + MAIN_CLASS(2), + DRIVER_NOT_SET(0); + private final int value; + private DriverCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DriverCase valueOf(int value) { + return forNumber(value); + } + + public static DriverCase forNumber(int value) { + switch (value) { + case 1: return MAIN_JAR_FILE_URI; + case 2: return MAIN_CLASS; + case 0: return DRIVER_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public DriverCase + getDriverCase() { + return DriverCase.forNumber( + driverCase_); + } + + public static final int MAIN_JAR_FILE_URI_FIELD_NUMBER = 1; + /** + *
+   * The HCFS URI of the jar file containing the main class.
+   * Examples:
+   *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+   *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+   *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+   * 
+ * + * string main_jar_file_uri = 1; + */ + public java.lang.String getMainJarFileUri() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 1) { + driver_ = s; + } + return s; + } + } + /** + *
+   * The HCFS URI of the jar file containing the main class.
+   * Examples:
+   *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+   *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+   *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+   * 
+ * + * string main_jar_file_uri = 1; + */ + public com.google.protobuf.ByteString + getMainJarFileUriBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 1) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MAIN_CLASS_FIELD_NUMBER = 2; + /** + *
+   * The name of the driver's main class. The jar file containing the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + public java.lang.String getMainClass() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 2) { + driver_ = s; + } + return s; + } + } + /** + *
+   * The name of the driver's main class. The jar file containing the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + public com.google.protobuf.ByteString + getMainClassBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 2) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList args_; + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ProtocolStringList + getArgsList() { + return args_; + } + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + public int getArgsCount() { + return args_.size(); + } + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ByteString + getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_; + } + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 5; + private com.google.protobuf.LazyStringList fileUris_; + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ProtocolStringList + getFileUrisList() { + return fileUris_; + } + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ByteString + getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList archiveUris_; + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getArchiveUrisList() { + return archiveUris_; + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ByteString + getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + public static final int PROPERTIES_FIELD_NUMBER = 7; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HadoopJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 8; + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_; + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (driverCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, driver_); + } + if (driverCase_ == 2) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, driver_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, args_.getRaw(i)); + } + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, jarFileUris_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, archiveUris_.getRaw(i)); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 7); + if (loggingConfig_ != null) { + output.writeMessage(8, getLoggingConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (driverCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, driver_); + } + if (driverCase_ == 2) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, driver_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, properties__); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, getLoggingConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.HadoopJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.HadoopJob other = (com.google.cloud.dataproc.v1beta2.HadoopJob) obj; + + boolean result = true; + result = result && getArgsList() + .equals(other.getArgsList()); + result = result && getJarFileUrisList() + .equals(other.getJarFileUrisList()); + result = result && getFileUrisList() + .equals(other.getFileUrisList()); + result = result && getArchiveUrisList() + .equals(other.getArchiveUrisList()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && (hasLoggingConfig() == other.hasLoggingConfig()); + if (hasLoggingConfig()) { + result = result && getLoggingConfig() + .equals(other.getLoggingConfig()); + } + result = result && getDriverCase().equals( + other.getDriverCase()); + if (!result) return false; + switch (driverCase_) { + case 1: + result = result && getMainJarFileUri() + .equals(other.getMainJarFileUri()); + break; + case 2: + result = result && getMainClass() + .equals(other.getMainClass()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + switch (driverCase_) { + case 1: + hash = (37 * hash) + MAIN_JAR_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainJarFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + MAIN_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getMainClass().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.HadoopJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.HadoopJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job for running
+   * [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+   * jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.HadoopJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.HadoopJob) + com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 7: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 7: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HadoopJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.HadoopJob.class, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.HadoopJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + internalGetMutableProperties().clear(); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + driverCase_ = 0; + driver_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HadoopJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HadoopJob build() { + com.google.cloud.dataproc.v1beta2.HadoopJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HadoopJob buildPartial() { + com.google.cloud.dataproc.v1beta2.HadoopJob result = new com.google.cloud.dataproc.v1beta2.HadoopJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (driverCase_ == 1) { + result.driver_ = driver_; + } + if (driverCase_ == 2) { + result.driver_ = driver_; + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.args_ = args_; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.jarFileUris_ = jarFileUris_; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.archiveUris_ = archiveUris_; + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + result.driverCase_ = driverCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.HadoopJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.HadoopJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.HadoopJob other) { + if (other == com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance()) return this; + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + switch (other.getDriverCase()) { + case MAIN_JAR_FILE_URI: { + driverCase_ = 1; + driver_ = other.driver_; + onChanged(); + break; + } + case MAIN_CLASS: { + driverCase_ = 2; + driver_ = other.driver_; + onChanged(); + break; + } + case DRIVER_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.HadoopJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.HadoopJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int driverCase_ = 0; + private java.lang.Object driver_; + public DriverCase + getDriverCase() { + return DriverCase.forNumber( + driverCase_); + } + + public Builder clearDriver() { + driverCase_ = 0; + driver_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + *
+     * The HCFS URI of the jar file containing the main class.
+     * Examples:
+     *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+     *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+     *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public java.lang.String getMainJarFileUri() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 1) { + driver_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The HCFS URI of the jar file containing the main class.
+     * Examples:
+     *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+     *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+     *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public com.google.protobuf.ByteString + getMainJarFileUriBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 1) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The HCFS URI of the jar file containing the main class.
+     * Examples:
+     *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+     *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+     *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public Builder setMainJarFileUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + driverCase_ = 1; + driver_ = value; + onChanged(); + return this; + } + /** + *
+     * The HCFS URI of the jar file containing the main class.
+     * Examples:
+     *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+     *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+     *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public Builder clearMainJarFileUri() { + if (driverCase_ == 1) { + driverCase_ = 0; + driver_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The HCFS URI of the jar file containing the main class.
+     * Examples:
+     *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+     *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+     *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public Builder setMainJarFileUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + driverCase_ = 1; + driver_ = value; + onChanged(); + return this; + } + + /** + *
+     * The name of the driver's main class. The jar file containing the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public java.lang.String getMainClass() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 2) { + driver_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The name of the driver's main class. The jar file containing the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public com.google.protobuf.ByteString + getMainClassBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 2) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The name of the driver's main class. The jar file containing the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public Builder setMainClass( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + driverCase_ = 2; + driver_ = value; + onChanged(); + return this; + } + /** + *
+     * The name of the driver's main class. The jar file containing the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public Builder clearMainClass() { + if (driverCase_ == 2) { + driverCase_ = 0; + driver_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The name of the driver's main class. The jar file containing the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public Builder setMainClassBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + driverCase_ = 2; + driver_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000004; + } + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ProtocolStringList + getArgsList() { + return args_.getUnmodifiableView(); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public int getArgsCount() { + return args_.size(); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ByteString + getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder setArgs( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder addArgs( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder addAllArgs( + java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, args_); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+     * properties, since a collision may occur that causes an incorrect job
+     * submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder addArgsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000008; + } + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder setJarFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addJarFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addAllJarFileUris( + java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jarFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + *
+     * Optional. Jar file URIs to add to the CLASSPATHs of the
+     * Hadoop driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addJarFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000010; + } + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ProtocolStringList + getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ByteString + getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder setFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addAllFileUris( + java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, fileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+     * to the working directory of Hadoop drivers and distributed tasks. Useful
+     * for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000020; + } + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ByteString + getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder setArchiveUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addArchiveUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addAllArchiveUris( + java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, archiveUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Hadoop drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, or .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addArchiveUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Hadoop.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+     * classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> loggingConfigBuilder_; + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1beta2.LoggingConfig.newBuilder(loggingConfig_).mergeFrom(value).buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null ? + com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder>( + getLoggingConfig(), + getParentForChildren(), + isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.HadoopJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HadoopJob) + private static final com.google.cloud.dataproc.v1beta2.HadoopJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.HadoopJob(); + } + + public static com.google.cloud.dataproc.v1beta2.HadoopJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HadoopJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new HadoopJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HadoopJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java new file mode 100644 index 000000000000..384fc0016773 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java @@ -0,0 +1,323 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface HadoopJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.HadoopJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The HCFS URI of the jar file containing the main class.
+   * Examples:
+   *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+   *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+   *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+   * 
+ * + * string main_jar_file_uri = 1; + */ + java.lang.String getMainJarFileUri(); + /** + *
+   * The HCFS URI of the jar file containing the main class.
+   * Examples:
+   *     'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
+   *     'hdfs:/tmp/test-samples/custom-wordcount.jar'
+   *     'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+   * 
+ * + * string main_jar_file_uri = 1; + */ + com.google.protobuf.ByteString + getMainJarFileUriBytes(); + + /** + *
+   * The name of the driver's main class. The jar file containing the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + java.lang.String getMainClass(); + /** + *
+   * The name of the driver's main class. The jar file containing the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + com.google.protobuf.ByteString + getMainClassBytes(); + + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + java.util.List + getArgsList(); + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + int getArgsCount(); + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + java.lang.String getArgs(int index); + /** + *
+   * Optional. The arguments to pass to the driver. Do not
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
+   * properties, since a collision may occur that causes an incorrect job
+   * submission.
+   * 
+ * + * repeated string args = 3; + */ + com.google.protobuf.ByteString + getArgsBytes(int index); + + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + java.util.List + getJarFileUrisList(); + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + int getJarFileUrisCount(); + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + java.lang.String getJarFileUris(int index); + /** + *
+   * Optional. Jar file URIs to add to the CLASSPATHs of the
+   * Hadoop driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + com.google.protobuf.ByteString + getJarFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + java.util.List + getFileUrisList(); + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + int getFileUrisCount(); + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + java.lang.String getFileUris(int index); + /** + *
+   * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
+   * to the working directory of Hadoop drivers and distributed tasks. Useful
+   * for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + com.google.protobuf.ByteString + getFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + java.util.List + getArchiveUrisList(); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + int getArchiveUrisCount(); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + java.lang.String getArchiveUris(int index); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Hadoop drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, or .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + com.google.protobuf.ByteString + getArchiveUrisBytes(int index); + + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + int getPropertiesCount(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. A mapping of property names to values, used to configure Hadoop.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site and
+   * classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + boolean hasLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.HadoopJob.DriverCase getDriverCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java new file mode 100644 index 000000000000..10526216de61 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java @@ -0,0 +1,1912 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
+ * queries on YARN.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.HiveJob} + */ +public final class HiveJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.HiveJob) + HiveJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use HiveJob.newBuilder() to construct. + private HiveJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private HiveJob() { + continueOnFailure_ = false; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private HiveJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + queriesCase_ = 1; + queries_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.QueryList.Builder subBuilder = null; + if (queriesCase_ == 2) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.QueryList) queries_).toBuilder(); + } + queries_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.QueryList.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.QueryList) queries_); + queries_ = subBuilder.buildPartial(); + } + queriesCase_ = 2; + break; + } + case 24: { + + continueOnFailure_ = input.readBool(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + scriptVariables_ = com.google.protobuf.MapField.newMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000008; + } + com.google.protobuf.MapEntry + scriptVariables__ = input.readMessage( + ScriptVariablesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + scriptVariables_.getMutableMap().put( + scriptVariables__.getKey(), scriptVariables__.getValue()); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000010; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + jarFileUris_.add(s); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 4: + return internalGetScriptVariables(); + case 5: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.HiveJob.class, com.google.cloud.dataproc.v1beta2.HiveJob.Builder.class); + } + + private int bitField0_; + private int queriesCase_ = 0; + private java.lang.Object queries_; + public enum QueriesCase + implements com.google.protobuf.Internal.EnumLite { + QUERY_FILE_URI(1), + QUERY_LIST(2), + QUERIES_NOT_SET(0); + private final int value; + private QueriesCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static QueriesCase valueOf(int value) { + return forNumber(value); + } + + public static QueriesCase forNumber(int value) { + switch (value) { + case 1: return QUERY_FILE_URI; + case 2: return QUERY_LIST; + case 0: return QUERIES_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public QueriesCase + getQueriesCase() { + return QueriesCase.forNumber( + queriesCase_); + } + + public static final int QUERY_FILE_URI_FIELD_NUMBER = 1; + /** + *
+   * The HCFS URI of the script that contains Hive queries.
+   * 
+ * + * string query_file_uri = 1; + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } + } + /** + *
+   * The HCFS URI of the script that contains Hive queries.
+   * 
+ * + * string query_file_uri = 1; + */ + public com.google.protobuf.ByteString + getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int QUERY_LIST_FIELD_NUMBER = 2; + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList getQueryList() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + + public static final int CONTINUE_ON_FAILURE_FIELD_NUMBER = 3; + private boolean continueOnFailure_; + /** + *
+   * Optional. Whether to continue executing queries if a query fails.
+   * The default value is `false`. Setting to `true` can be useful when executing
+   * independent parallel queries.
+   * 
+ * + * bool continue_on_failure = 3; + */ + public boolean getContinueOnFailure() { + return continueOnFailure_; + } + + public static final int SCRIPT_VARIABLES_FIELD_NUMBER = 4; + private static final class ScriptVariablesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_ScriptVariablesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> scriptVariables_; + private com.google.protobuf.MapField + internalGetScriptVariables() { + if (scriptVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + return scriptVariables_; + } + + public int getScriptVariablesCount() { + return internalGetScriptVariables().getMap().size(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public boolean containsScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetScriptVariables().getMap().containsKey(key); + } + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getScriptVariables() { + return getScriptVariablesMap(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.util.Map getScriptVariablesMap() { + return internalGetScriptVariables().getMap(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int PROPERTIES_FIELD_NUMBER = 5; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_; + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (queriesCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queries_); + } + if (queriesCase_ == 2) { + output.writeMessage(2, (com.google.cloud.dataproc.v1beta2.QueryList) queries_); + } + if (continueOnFailure_ != false) { + output.writeBool(3, continueOnFailure_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetScriptVariables(), + ScriptVariablesDefaultEntryHolder.defaultEntry, + 4); + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 5); + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, jarFileUris_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (queriesCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, queries_); + } + if (queriesCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, (com.google.cloud.dataproc.v1beta2.QueryList) queries_); + } + if (continueOnFailure_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, continueOnFailure_); + } + for (java.util.Map.Entry entry + : internalGetScriptVariables().getMap().entrySet()) { + com.google.protobuf.MapEntry + scriptVariables__ = ScriptVariablesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, scriptVariables__); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, properties__); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.HiveJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.HiveJob other = (com.google.cloud.dataproc.v1beta2.HiveJob) obj; + + boolean result = true; + result = result && (getContinueOnFailure() + == other.getContinueOnFailure()); + result = result && internalGetScriptVariables().equals( + other.internalGetScriptVariables()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && getJarFileUrisList() + .equals(other.getJarFileUrisList()); + result = result && getQueriesCase().equals( + other.getQueriesCase()); + if (!result) return false; + switch (queriesCase_) { + case 1: + result = result && getQueryFileUri() + .equals(other.getQueryFileUri()); + break; + case 2: + result = result && getQueryList() + .equals(other.getQueryList()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CONTINUE_ON_FAILURE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getContinueOnFailure()); + if (!internalGetScriptVariables().getMap().isEmpty()) { + hash = (37 * hash) + SCRIPT_VARIABLES_FIELD_NUMBER; + hash = (53 * hash) + internalGetScriptVariables().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + switch (queriesCase_) { + case 1: + hash = (37 * hash) + QUERY_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getQueryFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + QUERY_LIST_FIELD_NUMBER; + hash = (53 * hash) + getQueryList().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.HiveJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.HiveJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
+   * queries on YARN.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.HiveJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.HiveJob) + com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 4: + return internalGetScriptVariables(); + case 5: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 4: + return internalGetMutableScriptVariables(); + case 5: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.HiveJob.class, com.google.cloud.dataproc.v1beta2.HiveJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.HiveJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + continueOnFailure_ = false; + + internalGetMutableScriptVariables().clear(); + internalGetMutableProperties().clear(); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + queriesCase_ = 0; + queries_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HiveJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HiveJob build() { + com.google.cloud.dataproc.v1beta2.HiveJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HiveJob buildPartial() { + com.google.cloud.dataproc.v1beta2.HiveJob result = new com.google.cloud.dataproc.v1beta2.HiveJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (queriesCase_ == 1) { + result.queries_ = queries_; + } + if (queriesCase_ == 2) { + if (queryListBuilder_ == null) { + result.queries_ = queries_; + } else { + result.queries_ = queryListBuilder_.build(); + } + } + result.continueOnFailure_ = continueOnFailure_; + result.scriptVariables_ = internalGetScriptVariables(); + result.scriptVariables_.makeImmutable(); + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.jarFileUris_ = jarFileUris_; + result.bitField0_ = to_bitField0_; + result.queriesCase_ = queriesCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.HiveJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.HiveJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.HiveJob other) { + if (other == com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance()) return this; + if (other.getContinueOnFailure() != false) { + setContinueOnFailure(other.getContinueOnFailure()); + } + internalGetMutableScriptVariables().mergeFrom( + other.internalGetScriptVariables()); + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + switch (other.getQueriesCase()) { + case QUERY_FILE_URI: { + queriesCase_ = 1; + queries_ = other.queries_; + onChanged(); + break; + } + case QUERY_LIST: { + mergeQueryList(other.getQueryList()); + break; + } + case QUERIES_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.HiveJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.HiveJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int queriesCase_ = 0; + private java.lang.Object queries_; + public QueriesCase + getQueriesCase() { + return QueriesCase.forNumber( + queriesCase_); + } + + public Builder clearQueries() { + queriesCase_ = 0; + queries_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + *
+     * The HCFS URI of the script that contains Hive queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The HCFS URI of the script that contains Hive queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public com.google.protobuf.ByteString + getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The HCFS URI of the script that contains Hive queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder setQueryFileUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + /** + *
+     * The HCFS URI of the script that contains Hive queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder clearQueryFileUri() { + if (queriesCase_ == 1) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The HCFS URI of the script that contains Hive queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder setQueryFileUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder> queryListBuilder_; + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList getQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } else { + if (queriesCase_ == 2) { + return queryListBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder setQueryList(com.google.cloud.dataproc.v1beta2.QueryList value) { + if (queryListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queries_ = value; + onChanged(); + } else { + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder setQueryList( + com.google.cloud.dataproc.v1beta2.QueryList.Builder builderForValue) { + if (queryListBuilder_ == null) { + queries_ = builderForValue.build(); + onChanged(); + } else { + queryListBuilder_.setMessage(builderForValue.build()); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder mergeQueryList(com.google.cloud.dataproc.v1beta2.QueryList value) { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2 && + queries_ != com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance()) { + queries_ = com.google.cloud.dataproc.v1beta2.QueryList.newBuilder((com.google.cloud.dataproc.v1beta2.QueryList) queries_) + .mergeFrom(value).buildPartial(); + } else { + queries_ = value; + } + onChanged(); + } else { + if (queriesCase_ == 2) { + queryListBuilder_.mergeFrom(value); + } + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder clearQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + } else { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + } + queryListBuilder_.clear(); + } + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList.Builder getQueryListBuilder() { + return getQueryListFieldBuilder().getBuilder(); + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder() { + if ((queriesCase_ == 2) && (queryListBuilder_ != null)) { + return queryListBuilder_.getMessageOrBuilder(); + } else { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder> + getQueryListFieldBuilder() { + if (queryListBuilder_ == null) { + if (!(queriesCase_ == 2)) { + queries_ = com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + queryListBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder>( + (com.google.cloud.dataproc.v1beta2.QueryList) queries_, + getParentForChildren(), + isClean()); + queries_ = null; + } + queriesCase_ = 2; + onChanged();; + return queryListBuilder_; + } + + private boolean continueOnFailure_ ; + /** + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when executing
+     * independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3; + */ + public boolean getContinueOnFailure() { + return continueOnFailure_; + } + /** + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when executing
+     * independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3; + */ + public Builder setContinueOnFailure(boolean value) { + + continueOnFailure_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when executing
+     * independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3; + */ + public Builder clearContinueOnFailure() { + + continueOnFailure_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> scriptVariables_; + private com.google.protobuf.MapField + internalGetScriptVariables() { + if (scriptVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + return scriptVariables_; + } + private com.google.protobuf.MapField + internalGetMutableScriptVariables() { + onChanged();; + if (scriptVariables_ == null) { + scriptVariables_ = com.google.protobuf.MapField.newMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + if (!scriptVariables_.isMutable()) { + scriptVariables_ = scriptVariables_.copy(); + } + return scriptVariables_; + } + + public int getScriptVariablesCount() { + return internalGetScriptVariables().getMap().size(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public boolean containsScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetScriptVariables().getMap().containsKey(key); + } + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getScriptVariables() { + return getScriptVariablesMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.util.Map getScriptVariablesMap() { + return internalGetScriptVariables().getMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearScriptVariables() { + internalGetMutableScriptVariables().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public Builder removeScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableScriptVariables().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableScriptVariables() { + return internalGetMutableScriptVariables().getMutableMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + public Builder putScriptVariables( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableScriptVariables().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Hive command: `SET name="value";`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public Builder putAllScriptVariables( + java.util.Map values) { + internalGetMutableScriptVariables().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. A mapping of property names and values, used to configure Hive.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/hive/conf/hive-site.xml, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000020; + } + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder setJarFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder addJarFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder addAllJarFileUris( + java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jarFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+     * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+     * and UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder addJarFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.HiveJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HiveJob) + private static final com.google.cloud.dataproc.v1beta2.HiveJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.HiveJob(); + } + + public static com.google.cloud.dataproc.v1beta2.HiveJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HiveJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new HiveJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.HiveJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java new file mode 100644 index 000000000000..d90e8a3e04b5 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java @@ -0,0 +1,236 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface HiveJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.HiveJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The HCFS URI of the script that contains Hive queries.
+   * 
+ * + * string query_file_uri = 1; + */ + java.lang.String getQueryFileUri(); + /** + *
+   * The HCFS URI of the script that contains Hive queries.
+   * 
+ * + * string query_file_uri = 1; + */ + com.google.protobuf.ByteString + getQueryFileUriBytes(); + + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + boolean hasQueryList(); + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1beta2.QueryList getQueryList(); + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder(); + + /** + *
+   * Optional. Whether to continue executing queries if a query fails.
+   * The default value is `false`. Setting to `true` can be useful when executing
+   * independent parallel queries.
+   * 
+ * + * bool continue_on_failure = 3; + */ + boolean getContinueOnFailure(); + + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + int getScriptVariablesCount(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + boolean containsScriptVariables( + java.lang.String key); + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getScriptVariables(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + java.util.Map + getScriptVariablesMap(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Hive command: `SET name="value";`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + java.lang.String getScriptVariablesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + int getPropertiesCount(); + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. A mapping of property names and values, used to configure Hive.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/hive/conf/hive-site.xml, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + java.util.List + getJarFileUrisList(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + int getJarFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + java.lang.String getJarFileUris(int index); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of the
+   * Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
+   * and UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + com.google.protobuf.ByteString + getJarFileUrisBytes(int index); + + public com.google.cloud.dataproc.v1beta2.HiveJob.QueriesCase getQueriesCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java new file mode 100644 index 000000000000..b1fe652db26d --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java @@ -0,0 +1,2345 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Optional. The config settings for Compute Engine resources in
+ * an instance group, such as a master or worker group.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.InstanceGroupConfig} + */ +public final class InstanceGroupConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.InstanceGroupConfig) + InstanceGroupConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use InstanceGroupConfig.newBuilder() to construct. + private InstanceGroupConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private InstanceGroupConfig() { + numInstances_ = 0; + instanceNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + imageUri_ = ""; + machineTypeUri_ = ""; + isPreemptible_ = false; + accelerators_ = java.util.Collections.emptyList(); + minCpuPlatform_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private InstanceGroupConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + numInstances_ = input.readInt32(); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + instanceNames_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + instanceNames_.add(s); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + imageUri_ = s; + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + machineTypeUri_ = s; + break; + } + case 42: { + com.google.cloud.dataproc.v1beta2.DiskConfig.Builder subBuilder = null; + if (diskConfig_ != null) { + subBuilder = diskConfig_.toBuilder(); + } + diskConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.DiskConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(diskConfig_); + diskConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 48: { + + isPreemptible_ = input.readBool(); + break; + } + case 58: { + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder subBuilder = null; + if (managedGroupConfig_ != null) { + subBuilder = managedGroupConfig_.toBuilder(); + } + managedGroupConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(managedGroupConfig_); + managedGroupConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + accelerators_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + accelerators_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.AcceleratorConfig.parser(), extensionRegistry)); + break; + } + case 74: { + java.lang.String s = input.readStringRequireUtf8(); + + minCpuPlatform_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + instanceNames_ = instanceNames_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + accelerators_ = java.util.Collections.unmodifiableList(accelerators_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.class, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder.class); + } + + private int bitField0_; + public static final int NUM_INSTANCES_FIELD_NUMBER = 1; + private int numInstances_; + /** + *
+   * Optional. The number of VM instances in the instance group.
+   * For master instance groups, must be set to 1.
+   * 
+ * + * int32 num_instances = 1; + */ + public int getNumInstances() { + return numInstances_; + } + + public static final int INSTANCE_NAMES_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList instanceNames_; + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + public com.google.protobuf.ProtocolStringList + getInstanceNamesList() { + return instanceNames_; + } + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + public int getInstanceNamesCount() { + return instanceNames_.size(); + } + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + public java.lang.String getInstanceNames(int index) { + return instanceNames_.get(index); + } + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + public com.google.protobuf.ByteString + getInstanceNamesBytes(int index) { + return instanceNames_.getByteString(index); + } + + public static final int IMAGE_URI_FIELD_NUMBER = 3; + private volatile java.lang.Object imageUri_; + /** + *
+   * Output only. The Compute Engine image resource used for cluster
+   * instances. Inferred from `SoftwareConfig.image_version`.
+   * 
+ * + * string image_uri = 3; + */ + public java.lang.String getImageUri() { + java.lang.Object ref = imageUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + imageUri_ = s; + return s; + } + } + /** + *
+   * Output only. The Compute Engine image resource used for cluster
+   * instances. Inferred from `SoftwareConfig.image_version`.
+   * 
+ * + * string image_uri = 3; + */ + public com.google.protobuf.ByteString + getImageUriBytes() { + java.lang.Object ref = imageUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + imageUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MACHINE_TYPE_URI_FIELD_NUMBER = 4; + private volatile java.lang.Object machineTypeUri_; + /** + *
+   * Optional. The Compute Engine machine type used for cluster instances.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `n1-standard-2`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the machine type
+   * resource, for example, `n1-standard-2`.
+   * 
+ * + * string machine_type_uri = 4; + */ + public java.lang.String getMachineTypeUri() { + java.lang.Object ref = machineTypeUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + machineTypeUri_ = s; + return s; + } + } + /** + *
+   * Optional. The Compute Engine machine type used for cluster instances.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `n1-standard-2`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the machine type
+   * resource, for example, `n1-standard-2`.
+   * 
+ * + * string machine_type_uri = 4; + */ + public com.google.protobuf.ByteString + getMachineTypeUriBytes() { + java.lang.Object ref = machineTypeUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + machineTypeUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DISK_CONFIG_FIELD_NUMBER = 5; + private com.google.cloud.dataproc.v1beta2.DiskConfig diskConfig_; + /** + *
+   * Optional. Disk option config settings.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public boolean hasDiskConfig() { + return diskConfig_ != null; + } + /** + *
+   * Optional. Disk option config settings.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public com.google.cloud.dataproc.v1beta2.DiskConfig getDiskConfig() { + return diskConfig_ == null ? com.google.cloud.dataproc.v1beta2.DiskConfig.getDefaultInstance() : diskConfig_; + } + /** + *
+   * Optional. Disk option config settings.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder getDiskConfigOrBuilder() { + return getDiskConfig(); + } + + public static final int IS_PREEMPTIBLE_FIELD_NUMBER = 6; + private boolean isPreemptible_; + /** + *
+   * Optional. Specifies that this instance group contains preemptible instances.
+   * 
+ * + * bool is_preemptible = 6; + */ + public boolean getIsPreemptible() { + return isPreemptible_; + } + + public static final int MANAGED_GROUP_CONFIG_FIELD_NUMBER = 7; + private com.google.cloud.dataproc.v1beta2.ManagedGroupConfig managedGroupConfig_; + /** + *
+   * Output only. The config for Compute Engine Instance Group
+   * Manager that manages this group.
+   * This is only used for preemptible instance groups.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public boolean hasManagedGroupConfig() { + return managedGroupConfig_ != null; + } + /** + *
+   * Output only. The config for Compute Engine Instance Group
+   * Manager that manages this group.
+   * This is only used for preemptible instance groups.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig getManagedGroupConfig() { + return managedGroupConfig_ == null ? com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.getDefaultInstance() : managedGroupConfig_; + } + /** + *
+   * Output only. The config for Compute Engine Instance Group
+   * Manager that manages this group.
+   * This is only used for preemptible instance groups.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder getManagedGroupConfigOrBuilder() { + return getManagedGroupConfig(); + } + + public static final int ACCELERATORS_FIELD_NUMBER = 8; + private java.util.List accelerators_; + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public java.util.List getAcceleratorsList() { + return accelerators_; + } + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public java.util.List + getAcceleratorsOrBuilderList() { + return accelerators_; + } + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public int getAcceleratorsCount() { + return accelerators_.size(); + } + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig getAccelerators(int index) { + return accelerators_.get(index); + } + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder getAcceleratorsOrBuilder( + int index) { + return accelerators_.get(index); + } + + public static final int MIN_CPU_PLATFORM_FIELD_NUMBER = 9; + private volatile java.lang.Object minCpuPlatform_; + /** + *
+   * Optional. Specifies the minimum cpu platform for the Instance Group.
+   * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+   * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+   * 
+ * + * string min_cpu_platform = 9; + */ + public java.lang.String getMinCpuPlatform() { + java.lang.Object ref = minCpuPlatform_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + minCpuPlatform_ = s; + return s; + } + } + /** + *
+   * Optional. Specifies the minimum cpu platform for the Instance Group.
+   * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+   * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+   * 
+ * + * string min_cpu_platform = 9; + */ + public com.google.protobuf.ByteString + getMinCpuPlatformBytes() { + java.lang.Object ref = minCpuPlatform_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + minCpuPlatform_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (numInstances_ != 0) { + output.writeInt32(1, numInstances_); + } + for (int i = 0; i < instanceNames_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, instanceNames_.getRaw(i)); + } + if (!getImageUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, imageUri_); + } + if (!getMachineTypeUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, machineTypeUri_); + } + if (diskConfig_ != null) { + output.writeMessage(5, getDiskConfig()); + } + if (isPreemptible_ != false) { + output.writeBool(6, isPreemptible_); + } + if (managedGroupConfig_ != null) { + output.writeMessage(7, getManagedGroupConfig()); + } + for (int i = 0; i < accelerators_.size(); i++) { + output.writeMessage(8, accelerators_.get(i)); + } + if (!getMinCpuPlatformBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 9, minCpuPlatform_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (numInstances_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, numInstances_); + } + { + int dataSize = 0; + for (int i = 0; i < instanceNames_.size(); i++) { + dataSize += computeStringSizeNoTag(instanceNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getInstanceNamesList().size(); + } + if (!getImageUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, imageUri_); + } + if (!getMachineTypeUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, machineTypeUri_); + } + if (diskConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, getDiskConfig()); + } + if (isPreemptible_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, isPreemptible_); + } + if (managedGroupConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, getManagedGroupConfig()); + } + for (int i = 0; i < accelerators_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, accelerators_.get(i)); + } + if (!getMinCpuPlatformBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(9, minCpuPlatform_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.InstanceGroupConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig other = (com.google.cloud.dataproc.v1beta2.InstanceGroupConfig) obj; + + boolean result = true; + result = result && (getNumInstances() + == other.getNumInstances()); + result = result && getInstanceNamesList() + .equals(other.getInstanceNamesList()); + result = result && getImageUri() + .equals(other.getImageUri()); + result = result && getMachineTypeUri() + .equals(other.getMachineTypeUri()); + result = result && (hasDiskConfig() == other.hasDiskConfig()); + if (hasDiskConfig()) { + result = result && getDiskConfig() + .equals(other.getDiskConfig()); + } + result = result && (getIsPreemptible() + == other.getIsPreemptible()); + result = result && (hasManagedGroupConfig() == other.hasManagedGroupConfig()); + if (hasManagedGroupConfig()) { + result = result && getManagedGroupConfig() + .equals(other.getManagedGroupConfig()); + } + result = result && getAcceleratorsList() + .equals(other.getAcceleratorsList()); + result = result && getMinCpuPlatform() + .equals(other.getMinCpuPlatform()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NUM_INSTANCES_FIELD_NUMBER; + hash = (53 * hash) + getNumInstances(); + if (getInstanceNamesCount() > 0) { + hash = (37 * hash) + INSTANCE_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getInstanceNamesList().hashCode(); + } + hash = (37 * hash) + IMAGE_URI_FIELD_NUMBER; + hash = (53 * hash) + getImageUri().hashCode(); + hash = (37 * hash) + MACHINE_TYPE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMachineTypeUri().hashCode(); + if (hasDiskConfig()) { + hash = (37 * hash) + DISK_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getDiskConfig().hashCode(); + } + hash = (37 * hash) + IS_PREEMPTIBLE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getIsPreemptible()); + if (hasManagedGroupConfig()) { + hash = (37 * hash) + MANAGED_GROUP_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getManagedGroupConfig().hashCode(); + } + if (getAcceleratorsCount() > 0) { + hash = (37 * hash) + ACCELERATORS_FIELD_NUMBER; + hash = (53 * hash) + getAcceleratorsList().hashCode(); + } + hash = (37 * hash) + MIN_CPU_PLATFORM_FIELD_NUMBER; + hash = (53 * hash) + getMinCpuPlatform().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Optional. The config settings for Compute Engine resources in
+   * an instance group, such as a master or worker group.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.InstanceGroupConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.InstanceGroupConfig) + com.google.cloud.dataproc.v1beta2.InstanceGroupConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.class, com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getAcceleratorsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + numInstances_ = 0; + + instanceNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + imageUri_ = ""; + + machineTypeUri_ = ""; + + if (diskConfigBuilder_ == null) { + diskConfig_ = null; + } else { + diskConfig_ = null; + diskConfigBuilder_ = null; + } + isPreemptible_ = false; + + if (managedGroupConfigBuilder_ == null) { + managedGroupConfig_ = null; + } else { + managedGroupConfig_ = null; + managedGroupConfigBuilder_ = null; + } + if (acceleratorsBuilder_ == null) { + accelerators_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + acceleratorsBuilder_.clear(); + } + minCpuPlatform_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_InstanceGroupConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig build() { + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig result = new com.google.cloud.dataproc.v1beta2.InstanceGroupConfig(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.numInstances_ = numInstances_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + instanceNames_ = instanceNames_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.instanceNames_ = instanceNames_; + result.imageUri_ = imageUri_; + result.machineTypeUri_ = machineTypeUri_; + if (diskConfigBuilder_ == null) { + result.diskConfig_ = diskConfig_; + } else { + result.diskConfig_ = diskConfigBuilder_.build(); + } + result.isPreemptible_ = isPreemptible_; + if (managedGroupConfigBuilder_ == null) { + result.managedGroupConfig_ = managedGroupConfig_; + } else { + result.managedGroupConfig_ = managedGroupConfigBuilder_.build(); + } + if (acceleratorsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + accelerators_ = java.util.Collections.unmodifiableList(accelerators_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.accelerators_ = accelerators_; + } else { + result.accelerators_ = acceleratorsBuilder_.build(); + } + result.minCpuPlatform_ = minCpuPlatform_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.InstanceGroupConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.InstanceGroupConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.InstanceGroupConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.InstanceGroupConfig.getDefaultInstance()) return this; + if (other.getNumInstances() != 0) { + setNumInstances(other.getNumInstances()); + } + if (!other.instanceNames_.isEmpty()) { + if (instanceNames_.isEmpty()) { + instanceNames_ = other.instanceNames_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureInstanceNamesIsMutable(); + instanceNames_.addAll(other.instanceNames_); + } + onChanged(); + } + if (!other.getImageUri().isEmpty()) { + imageUri_ = other.imageUri_; + onChanged(); + } + if (!other.getMachineTypeUri().isEmpty()) { + machineTypeUri_ = other.machineTypeUri_; + onChanged(); + } + if (other.hasDiskConfig()) { + mergeDiskConfig(other.getDiskConfig()); + } + if (other.getIsPreemptible() != false) { + setIsPreemptible(other.getIsPreemptible()); + } + if (other.hasManagedGroupConfig()) { + mergeManagedGroupConfig(other.getManagedGroupConfig()); + } + if (acceleratorsBuilder_ == null) { + if (!other.accelerators_.isEmpty()) { + if (accelerators_.isEmpty()) { + accelerators_ = other.accelerators_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureAcceleratorsIsMutable(); + accelerators_.addAll(other.accelerators_); + } + onChanged(); + } + } else { + if (!other.accelerators_.isEmpty()) { + if (acceleratorsBuilder_.isEmpty()) { + acceleratorsBuilder_.dispose(); + acceleratorsBuilder_ = null; + accelerators_ = other.accelerators_; + bitField0_ = (bitField0_ & ~0x00000080); + acceleratorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getAcceleratorsFieldBuilder() : null; + } else { + acceleratorsBuilder_.addAllMessages(other.accelerators_); + } + } + } + if (!other.getMinCpuPlatform().isEmpty()) { + minCpuPlatform_ = other.minCpuPlatform_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.InstanceGroupConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.InstanceGroupConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private int numInstances_ ; + /** + *
+     * Optional. The number of VM instances in the instance group.
+     * For master instance groups, must be set to 1.
+     * 
+ * + * int32 num_instances = 1; + */ + public int getNumInstances() { + return numInstances_; + } + /** + *
+     * Optional. The number of VM instances in the instance group.
+     * For master instance groups, must be set to 1.
+     * 
+ * + * int32 num_instances = 1; + */ + public Builder setNumInstances(int value) { + + numInstances_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The number of VM instances in the instance group.
+     * For master instance groups, must be set to 1.
+     * 
+ * + * int32 num_instances = 1; + */ + public Builder clearNumInstances() { + + numInstances_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList instanceNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureInstanceNamesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + instanceNames_ = new com.google.protobuf.LazyStringArrayList(instanceNames_); + bitField0_ |= 0x00000002; + } + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public com.google.protobuf.ProtocolStringList + getInstanceNamesList() { + return instanceNames_.getUnmodifiableView(); + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public int getInstanceNamesCount() { + return instanceNames_.size(); + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public java.lang.String getInstanceNames(int index) { + return instanceNames_.get(index); + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public com.google.protobuf.ByteString + getInstanceNamesBytes(int index) { + return instanceNames_.getByteString(index); + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public Builder setInstanceNames( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstanceNamesIsMutable(); + instanceNames_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public Builder addInstanceNames( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstanceNamesIsMutable(); + instanceNames_.add(value); + onChanged(); + return this; + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public Builder addAllInstanceNames( + java.lang.Iterable values) { + ensureInstanceNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, instanceNames_); + onChanged(); + return this; + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public Builder clearInstanceNames() { + instanceNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + *
+     * Output only. The list of instance names. Cloud Dataproc derives the names
+     * from `cluster_name`, `num_instances`, and the instance group.
+     * 
+ * + * repeated string instance_names = 2; + */ + public Builder addInstanceNamesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureInstanceNamesIsMutable(); + instanceNames_.add(value); + onChanged(); + return this; + } + + private java.lang.Object imageUri_ = ""; + /** + *
+     * Output only. The Compute Engine image resource used for cluster
+     * instances. Inferred from `SoftwareConfig.image_version`.
+     * 
+ * + * string image_uri = 3; + */ + public java.lang.String getImageUri() { + java.lang.Object ref = imageUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + imageUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The Compute Engine image resource used for cluster
+     * instances. Inferred from `SoftwareConfig.image_version`.
+     * 
+ * + * string image_uri = 3; + */ + public com.google.protobuf.ByteString + getImageUriBytes() { + java.lang.Object ref = imageUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + imageUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The Compute Engine image resource used for cluster
+     * instances. Inferred from `SoftwareConfig.image_version`.
+     * 
+ * + * string image_uri = 3; + */ + public Builder setImageUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + imageUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The Compute Engine image resource used for cluster
+     * instances. Inferred from `SoftwareConfig.image_version`.
+     * 
+ * + * string image_uri = 3; + */ + public Builder clearImageUri() { + + imageUri_ = getDefaultInstance().getImageUri(); + onChanged(); + return this; + } + /** + *
+     * Output only. The Compute Engine image resource used for cluster
+     * instances. Inferred from `SoftwareConfig.image_version`.
+     * 
+ * + * string image_uri = 3; + */ + public Builder setImageUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + imageUri_ = value; + onChanged(); + return this; + } + + private java.lang.Object machineTypeUri_ = ""; + /** + *
+     * Optional. The Compute Engine machine type used for cluster instances.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `n1-standard-2`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the machine type
+     * resource, for example, `n1-standard-2`.
+     * 
+ * + * string machine_type_uri = 4; + */ + public java.lang.String getMachineTypeUri() { + java.lang.Object ref = machineTypeUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + machineTypeUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The Compute Engine machine type used for cluster instances.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `n1-standard-2`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the machine type
+     * resource, for example, `n1-standard-2`.
+     * 
+ * + * string machine_type_uri = 4; + */ + public com.google.protobuf.ByteString + getMachineTypeUriBytes() { + java.lang.Object ref = machineTypeUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + machineTypeUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The Compute Engine machine type used for cluster instances.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `n1-standard-2`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the machine type
+     * resource, for example, `n1-standard-2`.
+     * 
+ * + * string machine_type_uri = 4; + */ + public Builder setMachineTypeUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + machineTypeUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The Compute Engine machine type used for cluster instances.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `n1-standard-2`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the machine type
+     * resource, for example, `n1-standard-2`.
+     * 
+ * + * string machine_type_uri = 4; + */ + public Builder clearMachineTypeUri() { + + machineTypeUri_ = getDefaultInstance().getMachineTypeUri(); + onChanged(); + return this; + } + /** + *
+     * Optional. The Compute Engine machine type used for cluster instances.
+     * A full URL, partial URI, or short name are valid. Examples:
+     * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+     * * `n1-standard-2`
+     * **Auto Zone Exception**: If you are using the Cloud Dataproc
+     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * feature, you must use the short name of the machine type
+     * resource, for example, `n1-standard-2`.
+     * 
+ * + * string machine_type_uri = 4; + */ + public Builder setMachineTypeUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + machineTypeUri_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.DiskConfig diskConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.DiskConfig, com.google.cloud.dataproc.v1beta2.DiskConfig.Builder, com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder> diskConfigBuilder_; + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public boolean hasDiskConfig() { + return diskConfigBuilder_ != null || diskConfig_ != null; + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public com.google.cloud.dataproc.v1beta2.DiskConfig getDiskConfig() { + if (diskConfigBuilder_ == null) { + return diskConfig_ == null ? com.google.cloud.dataproc.v1beta2.DiskConfig.getDefaultInstance() : diskConfig_; + } else { + return diskConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public Builder setDiskConfig(com.google.cloud.dataproc.v1beta2.DiskConfig value) { + if (diskConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + diskConfig_ = value; + onChanged(); + } else { + diskConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public Builder setDiskConfig( + com.google.cloud.dataproc.v1beta2.DiskConfig.Builder builderForValue) { + if (diskConfigBuilder_ == null) { + diskConfig_ = builderForValue.build(); + onChanged(); + } else { + diskConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public Builder mergeDiskConfig(com.google.cloud.dataproc.v1beta2.DiskConfig value) { + if (diskConfigBuilder_ == null) { + if (diskConfig_ != null) { + diskConfig_ = + com.google.cloud.dataproc.v1beta2.DiskConfig.newBuilder(diskConfig_).mergeFrom(value).buildPartial(); + } else { + diskConfig_ = value; + } + onChanged(); + } else { + diskConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public Builder clearDiskConfig() { + if (diskConfigBuilder_ == null) { + diskConfig_ = null; + onChanged(); + } else { + diskConfig_ = null; + diskConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public com.google.cloud.dataproc.v1beta2.DiskConfig.Builder getDiskConfigBuilder() { + + onChanged(); + return getDiskConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + public com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder getDiskConfigOrBuilder() { + if (diskConfigBuilder_ != null) { + return diskConfigBuilder_.getMessageOrBuilder(); + } else { + return diskConfig_ == null ? + com.google.cloud.dataproc.v1beta2.DiskConfig.getDefaultInstance() : diskConfig_; + } + } + /** + *
+     * Optional. Disk option config settings.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.DiskConfig, com.google.cloud.dataproc.v1beta2.DiskConfig.Builder, com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder> + getDiskConfigFieldBuilder() { + if (diskConfigBuilder_ == null) { + diskConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.DiskConfig, com.google.cloud.dataproc.v1beta2.DiskConfig.Builder, com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder>( + getDiskConfig(), + getParentForChildren(), + isClean()); + diskConfig_ = null; + } + return diskConfigBuilder_; + } + + private boolean isPreemptible_ ; + /** + *
+     * Optional. Specifies that this instance group contains preemptible instances.
+     * 
+ * + * bool is_preemptible = 6; + */ + public boolean getIsPreemptible() { + return isPreemptible_; + } + /** + *
+     * Optional. Specifies that this instance group contains preemptible instances.
+     * 
+ * + * bool is_preemptible = 6; + */ + public Builder setIsPreemptible(boolean value) { + + isPreemptible_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Specifies that this instance group contains preemptible instances.
+     * 
+ * + * bool is_preemptible = 6; + */ + public Builder clearIsPreemptible() { + + isPreemptible_ = false; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ManagedGroupConfig managedGroupConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig, com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder> managedGroupConfigBuilder_; + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public boolean hasManagedGroupConfig() { + return managedGroupConfigBuilder_ != null || managedGroupConfig_ != null; + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig getManagedGroupConfig() { + if (managedGroupConfigBuilder_ == null) { + return managedGroupConfig_ == null ? com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.getDefaultInstance() : managedGroupConfig_; + } else { + return managedGroupConfigBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public Builder setManagedGroupConfig(com.google.cloud.dataproc.v1beta2.ManagedGroupConfig value) { + if (managedGroupConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + managedGroupConfig_ = value; + onChanged(); + } else { + managedGroupConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public Builder setManagedGroupConfig( + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder builderForValue) { + if (managedGroupConfigBuilder_ == null) { + managedGroupConfig_ = builderForValue.build(); + onChanged(); + } else { + managedGroupConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public Builder mergeManagedGroupConfig(com.google.cloud.dataproc.v1beta2.ManagedGroupConfig value) { + if (managedGroupConfigBuilder_ == null) { + if (managedGroupConfig_ != null) { + managedGroupConfig_ = + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.newBuilder(managedGroupConfig_).mergeFrom(value).buildPartial(); + } else { + managedGroupConfig_ = value; + } + onChanged(); + } else { + managedGroupConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public Builder clearManagedGroupConfig() { + if (managedGroupConfigBuilder_ == null) { + managedGroupConfig_ = null; + onChanged(); + } else { + managedGroupConfig_ = null; + managedGroupConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder getManagedGroupConfigBuilder() { + + onChanged(); + return getManagedGroupConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder getManagedGroupConfigOrBuilder() { + if (managedGroupConfigBuilder_ != null) { + return managedGroupConfigBuilder_.getMessageOrBuilder(); + } else { + return managedGroupConfig_ == null ? + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.getDefaultInstance() : managedGroupConfig_; + } + } + /** + *
+     * Output only. The config for Compute Engine Instance Group
+     * Manager that manages this group.
+     * This is only used for preemptible instance groups.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig, com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder> + getManagedGroupConfigFieldBuilder() { + if (managedGroupConfigBuilder_ == null) { + managedGroupConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig, com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder, com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder>( + getManagedGroupConfig(), + getParentForChildren(), + isClean()); + managedGroupConfig_ = null; + } + return managedGroupConfigBuilder_; + } + + private java.util.List accelerators_ = + java.util.Collections.emptyList(); + private void ensureAcceleratorsIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + accelerators_ = new java.util.ArrayList(accelerators_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.AcceleratorConfig, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder, com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder> acceleratorsBuilder_; + + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public java.util.List getAcceleratorsList() { + if (acceleratorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(accelerators_); + } else { + return acceleratorsBuilder_.getMessageList(); + } + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public int getAcceleratorsCount() { + if (acceleratorsBuilder_ == null) { + return accelerators_.size(); + } else { + return acceleratorsBuilder_.getCount(); + } + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig getAccelerators(int index) { + if (acceleratorsBuilder_ == null) { + return accelerators_.get(index); + } else { + return acceleratorsBuilder_.getMessage(index); + } + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder setAccelerators( + int index, com.google.cloud.dataproc.v1beta2.AcceleratorConfig value) { + if (acceleratorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAcceleratorsIsMutable(); + accelerators_.set(index, value); + onChanged(); + } else { + acceleratorsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder setAccelerators( + int index, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder builderForValue) { + if (acceleratorsBuilder_ == null) { + ensureAcceleratorsIsMutable(); + accelerators_.set(index, builderForValue.build()); + onChanged(); + } else { + acceleratorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder addAccelerators(com.google.cloud.dataproc.v1beta2.AcceleratorConfig value) { + if (acceleratorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAcceleratorsIsMutable(); + accelerators_.add(value); + onChanged(); + } else { + acceleratorsBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder addAccelerators( + int index, com.google.cloud.dataproc.v1beta2.AcceleratorConfig value) { + if (acceleratorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAcceleratorsIsMutable(); + accelerators_.add(index, value); + onChanged(); + } else { + acceleratorsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder addAccelerators( + com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder builderForValue) { + if (acceleratorsBuilder_ == null) { + ensureAcceleratorsIsMutable(); + accelerators_.add(builderForValue.build()); + onChanged(); + } else { + acceleratorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder addAccelerators( + int index, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder builderForValue) { + if (acceleratorsBuilder_ == null) { + ensureAcceleratorsIsMutable(); + accelerators_.add(index, builderForValue.build()); + onChanged(); + } else { + acceleratorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder addAllAccelerators( + java.lang.Iterable values) { + if (acceleratorsBuilder_ == null) { + ensureAcceleratorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, accelerators_); + onChanged(); + } else { + acceleratorsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder clearAccelerators() { + if (acceleratorsBuilder_ == null) { + accelerators_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + acceleratorsBuilder_.clear(); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public Builder removeAccelerators(int index) { + if (acceleratorsBuilder_ == null) { + ensureAcceleratorsIsMutable(); + accelerators_.remove(index); + onChanged(); + } else { + acceleratorsBuilder_.remove(index); + } + return this; + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder getAcceleratorsBuilder( + int index) { + return getAcceleratorsFieldBuilder().getBuilder(index); + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder getAcceleratorsOrBuilder( + int index) { + if (acceleratorsBuilder_ == null) { + return accelerators_.get(index); } else { + return acceleratorsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public java.util.List + getAcceleratorsOrBuilderList() { + if (acceleratorsBuilder_ != null) { + return acceleratorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(accelerators_); + } + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder addAcceleratorsBuilder() { + return getAcceleratorsFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.AcceleratorConfig.getDefaultInstance()); + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder addAcceleratorsBuilder( + int index) { + return getAcceleratorsFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.getDefaultInstance()); + } + /** + *
+     * Optional. The Compute Engine accelerator configuration for these
+     * instances.
+     * **Beta Feature**: This feature is still under development. It may be
+     * changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + public java.util.List + getAcceleratorsBuilderList() { + return getAcceleratorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.AcceleratorConfig, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder, com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder> + getAcceleratorsFieldBuilder() { + if (acceleratorsBuilder_ == null) { + acceleratorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.AcceleratorConfig, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder, com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder>( + accelerators_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + accelerators_ = null; + } + return acceleratorsBuilder_; + } + + private java.lang.Object minCpuPlatform_ = ""; + /** + *
+     * Optional. Specifies the minimum cpu platform for the Instance Group.
+     * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+     * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+     * 
+ * + * string min_cpu_platform = 9; + */ + public java.lang.String getMinCpuPlatform() { + java.lang.Object ref = minCpuPlatform_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + minCpuPlatform_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. Specifies the minimum cpu platform for the Instance Group.
+     * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+     * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+     * 
+ * + * string min_cpu_platform = 9; + */ + public com.google.protobuf.ByteString + getMinCpuPlatformBytes() { + java.lang.Object ref = minCpuPlatform_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + minCpuPlatform_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. Specifies the minimum cpu platform for the Instance Group.
+     * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+     * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+     * 
+ * + * string min_cpu_platform = 9; + */ + public Builder setMinCpuPlatform( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + minCpuPlatform_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Specifies the minimum cpu platform for the Instance Group.
+     * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+     * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+     * 
+ * + * string min_cpu_platform = 9; + */ + public Builder clearMinCpuPlatform() { + + minCpuPlatform_ = getDefaultInstance().getMinCpuPlatform(); + onChanged(); + return this; + } + /** + *
+     * Optional. Specifies the minimum cpu platform for the Instance Group.
+     * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+     * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+     * 
+ * + * string min_cpu_platform = 9; + */ + public Builder setMinCpuPlatformBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + minCpuPlatform_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.InstanceGroupConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstanceGroupConfig) + private static final com.google.cloud.dataproc.v1beta2.InstanceGroupConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.InstanceGroupConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InstanceGroupConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new InstanceGroupConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstanceGroupConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java new file mode 100644 index 000000000000..f6af4d9f3340 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java @@ -0,0 +1,258 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface InstanceGroupConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.InstanceGroupConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. The number of VM instances in the instance group.
+   * For master instance groups, must be set to 1.
+   * 
+ * + * int32 num_instances = 1; + */ + int getNumInstances(); + + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + java.util.List + getInstanceNamesList(); + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + int getInstanceNamesCount(); + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + java.lang.String getInstanceNames(int index); + /** + *
+   * Output only. The list of instance names. Cloud Dataproc derives the names
+   * from `cluster_name`, `num_instances`, and the instance group.
+   * 
+ * + * repeated string instance_names = 2; + */ + com.google.protobuf.ByteString + getInstanceNamesBytes(int index); + + /** + *
+   * Output only. The Compute Engine image resource used for cluster
+   * instances. Inferred from `SoftwareConfig.image_version`.
+   * 
+ * + * string image_uri = 3; + */ + java.lang.String getImageUri(); + /** + *
+   * Output only. The Compute Engine image resource used for cluster
+   * instances. Inferred from `SoftwareConfig.image_version`.
+   * 
+ * + * string image_uri = 3; + */ + com.google.protobuf.ByteString + getImageUriBytes(); + + /** + *
+   * Optional. The Compute Engine machine type used for cluster instances.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `n1-standard-2`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the machine type
+   * resource, for example, `n1-standard-2`.
+   * 
+ * + * string machine_type_uri = 4; + */ + java.lang.String getMachineTypeUri(); + /** + *
+   * Optional. The Compute Engine machine type used for cluster instances.
+   * A full URL, partial URI, or short name are valid. Examples:
+   * * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
+   * * `n1-standard-2`
+   * **Auto Zone Exception**: If you are using the Cloud Dataproc
+   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * feature, you must use the short name of the machine type
+   * resource, for example, `n1-standard-2`.
+   * 
+ * + * string machine_type_uri = 4; + */ + com.google.protobuf.ByteString + getMachineTypeUriBytes(); + + /** + *
+   * Optional. Disk option config settings.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + boolean hasDiskConfig(); + /** + *
+   * Optional. Disk option config settings.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + com.google.cloud.dataproc.v1beta2.DiskConfig getDiskConfig(); + /** + *
+   * Optional. Disk option config settings.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.DiskConfig disk_config = 5; + */ + com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder getDiskConfigOrBuilder(); + + /** + *
+   * Optional. Specifies that this instance group contains preemptible instances.
+   * 
+ * + * bool is_preemptible = 6; + */ + boolean getIsPreemptible(); + + /** + *
+   * Output only. The config for Compute Engine Instance Group
+   * Manager that manages this group.
+   * This is only used for preemptible instance groups.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + boolean hasManagedGroupConfig(); + /** + *
+   * Output only. The config for Compute Engine Instance Group
+   * Manager that manages this group.
+   * This is only used for preemptible instance groups.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig getManagedGroupConfig(); + /** + *
+   * Output only. The config for Compute Engine Instance Group
+   * Manager that manages this group.
+   * This is only used for preemptible instance groups.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedGroupConfig managed_group_config = 7; + */ + com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder getManagedGroupConfigOrBuilder(); + + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + java.util.List + getAcceleratorsList(); + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + com.google.cloud.dataproc.v1beta2.AcceleratorConfig getAccelerators(int index); + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + int getAcceleratorsCount(); + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + java.util.List + getAcceleratorsOrBuilderList(); + /** + *
+   * Optional. The Compute Engine accelerator configuration for these
+   * instances.
+   * **Beta Feature**: This feature is still under development. It may be
+   * changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.AcceleratorConfig accelerators = 8; + */ + com.google.cloud.dataproc.v1beta2.AcceleratorConfigOrBuilder getAcceleratorsOrBuilder( + int index); + + /** + *
+   * Optional. Specifies the minimum cpu platform for the Instance Group.
+   * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+   * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+   * 
+ * + * string min_cpu_platform = 9; + */ + java.lang.String getMinCpuPlatform(); + /** + *
+   * Optional. Specifies the minimum cpu platform for the Instance Group.
+   * See [Cloud Dataproc&rarr;Minimum CPU Platform]
+   * (/dataproc/docs/concepts/compute/dataproc-min-cpu).
+   * 
+ * + * string min_cpu_platform = 9; + */ + com.google.protobuf.ByteString + getMinCpuPlatformBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequest.java new file mode 100644 index 000000000000..a47526e26c88 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequest.java @@ -0,0 +1,1017 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to instantiate an inline workflow template.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest} + */ +public final class InstantiateInlineWorkflowTemplateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) + InstantiateInlineWorkflowTemplateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use InstantiateInlineWorkflowTemplateRequest.newBuilder() to construct. + private InstantiateInlineWorkflowTemplateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private InstantiateInlineWorkflowTemplateRequest() { + parent_ = ""; + instanceId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private InstantiateInlineWorkflowTemplateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder subBuilder = null; + if (template_ != null) { + subBuilder = template_.toBuilder(); + } + template_ = input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowTemplate.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(template_); + template_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + instanceId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + *
+   * Required. The "resource name" of the workflow template region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + *
+   * Required. The "resource name" of the workflow template region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + public com.google.protobuf.ByteString + getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TEMPLATE_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1beta2.WorkflowTemplate template_; + /** + *
+   * Required. The workflow template to instantiate.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public boolean hasTemplate() { + return template_ != null; + } + /** + *
+   * Required. The workflow template to instantiate.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate() { + return template_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } + /** + *
+   * Required. The workflow template to instantiate.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder() { + return getTemplate(); + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object instanceId_; + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + public com.google.protobuf.ByteString + getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (template_ != null) { + output.writeMessage(2, getTemplate()); + } + if (!getInstanceIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, instanceId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (template_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getTemplate()); + } + if (!getInstanceIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, instanceId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest other = (com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) obj; + + boolean result = true; + result = result && getParent() + .equals(other.getParent()); + result = result && (hasTemplate() == other.hasTemplate()); + if (hasTemplate()) { + result = result && getTemplate() + .equals(other.getTemplate()); + } + result = result && getInstanceId() + .equals(other.getInstanceId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasTemplate()) { + hash = (37 * hash) + TEMPLATE_FIELD_NUMBER; + hash = (53 * hash) + getTemplate().hashCode(); + } + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to instantiate an inline workflow template.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + if (templateBuilder_ == null) { + template_ = null; + } else { + template_ = null; + templateBuilder_ = null; + } + instanceId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest build() { + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest result = new com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest(this); + result.parent_ = parent_; + if (templateBuilder_ == null) { + result.template_ = template_; + } else { + result.template_ = templateBuilder_.build(); + } + result.instanceId_ = instanceId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.hasTemplate()) { + mergeTemplate(other.getTemplate()); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + *
+     * Required. The "resource name" of the workflow template region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public com.google.protobuf.ByteString + getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder setParent( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder setParentBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.WorkflowTemplate template_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> templateBuilder_; + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public boolean hasTemplate() { + return templateBuilder_ != null || template_ != null; + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate() { + if (templateBuilder_ == null) { + return template_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } else { + return templateBuilder_.getMessage(); + } + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder setTemplate(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + template_ = value; + onChanged(); + } else { + templateBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder setTemplate( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder builderForValue) { + if (templateBuilder_ == null) { + template_ = builderForValue.build(); + onChanged(); + } else { + templateBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder mergeTemplate(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templateBuilder_ == null) { + if (template_ != null) { + template_ = + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.newBuilder(template_).mergeFrom(value).buildPartial(); + } else { + template_ = value; + } + onChanged(); + } else { + templateBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public Builder clearTemplate() { + if (templateBuilder_ == null) { + template_ = null; + onChanged(); + } else { + template_ = null; + templateBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder getTemplateBuilder() { + + onChanged(); + return getTemplateFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder() { + if (templateBuilder_ != null) { + return templateBuilder_.getMessageOrBuilder(); + } else { + return template_ == null ? + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } + } + /** + *
+     * Required. The workflow template to instantiate.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> + getTemplateFieldBuilder() { + if (templateBuilder_ == null) { + templateBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder>( + getTemplate(), + getParentForChildren(), + isClean()); + template_ = null; + } + return templateBuilder_; + } + + private java.lang.Object instanceId_ = ""; + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public com.google.protobuf.ByteString + getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public Builder setInstanceId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + instanceId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public Builder clearInstanceId() { + + instanceId_ = getDefaultInstance().getInstanceId(); + onChanged(); + return this; + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public Builder setInstanceIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + instanceId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) + private static final com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InstantiateInlineWorkflowTemplateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new InstantiateInlineWorkflowTemplateRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequestOrBuilder.java new file mode 100644 index 000000000000..64c6cd914978 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequestOrBuilder.java @@ -0,0 +1,86 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface InstantiateInlineWorkflowTemplateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The "resource name" of the workflow template region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + java.lang.String getParent(); + /** + *
+   * Required. The "resource name" of the workflow template region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + com.google.protobuf.ByteString + getParentBytes(); + + /** + *
+   * Required. The workflow template to instantiate.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + boolean hasTemplate(); + /** + *
+   * Required. The workflow template to instantiate.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate(); + /** + *
+   * Required. The workflow template to instantiate.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 2; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder(); + + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + java.lang.String getInstanceId(); + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + com.google.protobuf.ByteString + getInstanceIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequest.java new file mode 100644 index 000000000000..3bb01dcd800b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequest.java @@ -0,0 +1,878 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to instantiate a workflow template.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest} + */ +public final class InstantiateWorkflowTemplateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) + InstantiateWorkflowTemplateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use InstantiateWorkflowTemplateRequest.newBuilder() to construct. + private InstantiateWorkflowTemplateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private InstantiateWorkflowTemplateRequest() { + name_ = ""; + version_ = 0; + instanceId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private InstantiateWorkflowTemplateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: { + + version_ = input.readInt32(); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + instanceId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_FIELD_NUMBER = 2; + private int version_; + /** + *
+   * Optional. The version of workflow template to instantiate. If specified,
+   * the workflow will be instantiated only if the current version of
+   * the workflow template has the supplied version.
+   * This option cannot be used to instantiate a previous version of
+   * workflow template.
+   * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object instanceId_; + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + public com.google.protobuf.ByteString + getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (version_ != 0) { + output.writeInt32(2, version_); + } + if (!getInstanceIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, instanceId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (version_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, version_); + } + if (!getInstanceIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, instanceId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest other = (com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) obj; + + boolean result = true; + result = result && getName() + .equals(other.getName()); + result = result && (getVersion() + == other.getVersion()); + result = result && getInstanceId() + .equals(other.getInstanceId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to instantiate a workflow template.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + version_ = 0; + + instanceId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest build() { + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest result = new com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest(this); + result.name_ = name_; + result.version_ = version_; + result.instanceId_ = instanceId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getVersion() != 0) { + setVersion(other.getVersion()); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the workflow template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int version_ ; + /** + *
+     * Optional. The version of workflow template to instantiate. If specified,
+     * the workflow will be instantiated only if the current version of
+     * the workflow template has the supplied version.
+     * This option cannot be used to instantiate a previous version of
+     * workflow template.
+     * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + /** + *
+     * Optional. The version of workflow template to instantiate. If specified,
+     * the workflow will be instantiated only if the current version of
+     * the workflow template has the supplied version.
+     * This option cannot be used to instantiate a previous version of
+     * workflow template.
+     * 
+ * + * int32 version = 2; + */ + public Builder setVersion(int value) { + + version_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The version of workflow template to instantiate. If specified,
+     * the workflow will be instantiated only if the current version of
+     * the workflow template has the supplied version.
+     * This option cannot be used to instantiate a previous version of
+     * workflow template.
+     * 
+ * + * int32 version = 2; + */ + public Builder clearVersion() { + + version_ = 0; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public com.google.protobuf.ByteString + getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public Builder setInstanceId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + instanceId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public Builder clearInstanceId() { + + instanceId_ = getDefaultInstance().getInstanceId(); + onChanged(); + return this; + } + /** + *
+     * Optional. A tag that prevents multiple concurrent workflow
+     * instances with the same tag from running. This mitigates risk of
+     * concurrent instances started due to retries.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string instance_id = 3; + */ + public Builder setInstanceIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + instanceId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) + private static final com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InstantiateWorkflowTemplateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new InstantiateWorkflowTemplateRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequestOrBuilder.java new file mode 100644 index 000000000000..4ecd13892645 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequestOrBuilder.java @@ -0,0 +1,74 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface InstantiateWorkflowTemplateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + java.lang.String getName(); + /** + *
+   * Required. The "resource name" of the workflow template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * Optional. The version of workflow template to instantiate. If specified,
+   * the workflow will be instantiated only if the current version of
+   * the workflow template has the supplied version.
+   * This option cannot be used to instantiate a previous version of
+   * workflow template.
+   * 
+ * + * int32 version = 2; + */ + int getVersion(); + + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + java.lang.String getInstanceId(); + /** + *
+   * Optional. A tag that prevents multiple concurrent workflow
+   * instances with the same tag from running. This mitigates risk of
+   * concurrent instances started due to retries.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The tag must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string instance_id = 3; + */ + com.google.protobuf.ByteString + getInstanceIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java new file mode 100644 index 000000000000..78d53309a476 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java @@ -0,0 +1,4614 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job resource.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.Job} + */ +public final class Job extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.Job) + JobOrBuilder { +private static final long serialVersionUID = 0L; + // Use Job.newBuilder() to construct. + private Job(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Job() { + statusHistory_ = java.util.Collections.emptyList(); + yarnApplications_ = java.util.Collections.emptyList(); + driverOutputResourceUri_ = ""; + driverControlFilesUri_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Job( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + com.google.cloud.dataproc.v1beta2.JobReference.Builder subBuilder = null; + if (reference_ != null) { + subBuilder = reference_.toBuilder(); + } + reference_ = input.readMessage(com.google.cloud.dataproc.v1beta2.JobReference.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(reference_); + reference_ = subBuilder.buildPartial(); + } + + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.JobPlacement.Builder subBuilder = null; + if (placement_ != null) { + subBuilder = placement_.toBuilder(); + } + placement_ = input.readMessage(com.google.cloud.dataproc.v1beta2.JobPlacement.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(placement_); + placement_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.HadoopJob.Builder subBuilder = null; + if (typeJobCase_ == 3) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.HadoopJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 3; + break; + } + case 34: { + com.google.cloud.dataproc.v1beta2.SparkJob.Builder subBuilder = null; + if (typeJobCase_ == 4) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.SparkJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 4; + break; + } + case 42: { + com.google.cloud.dataproc.v1beta2.PySparkJob.Builder subBuilder = null; + if (typeJobCase_ == 5) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.PySparkJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 5; + break; + } + case 50: { + com.google.cloud.dataproc.v1beta2.HiveJob.Builder subBuilder = null; + if (typeJobCase_ == 6) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.HiveJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 6; + break; + } + case 58: { + com.google.cloud.dataproc.v1beta2.PigJob.Builder subBuilder = null; + if (typeJobCase_ == 7) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.PigJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.PigJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.PigJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 7; + break; + } + case 66: { + com.google.cloud.dataproc.v1beta2.JobStatus.Builder subBuilder = null; + if (status_ != null) { + subBuilder = status_.toBuilder(); + } + status_ = input.readMessage(com.google.cloud.dataproc.v1beta2.JobStatus.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(status_); + status_ = subBuilder.buildPartial(); + } + + break; + } + case 74: { + if (!((mutable_bitField0_ & 0x00000400) == 0x00000400)) { + yarnApplications_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000400; + } + yarnApplications_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.YarnApplication.parser(), extensionRegistry)); + break; + } + case 98: { + com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder subBuilder = null; + if (typeJobCase_ == 12) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.SparkSqlJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 12; + break; + } + case 106: { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + statusHistory_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000200; + } + statusHistory_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.JobStatus.parser(), extensionRegistry)); + break; + } + case 122: { + java.lang.String s = input.readStringRequireUtf8(); + + driverControlFilesUri_ = s; + break; + } + case 138: { + java.lang.String s = input.readStringRequireUtf8(); + + driverOutputResourceUri_ = s; + break; + } + case 146: { + if (!((mutable_bitField0_ & 0x00002000) == 0x00002000)) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00002000; + } + com.google.protobuf.MapEntry + labels__ = input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put( + labels__.getKey(), labels__.getValue()); + break; + } + case 162: { + com.google.cloud.dataproc.v1beta2.JobScheduling.Builder subBuilder = null; + if (scheduling_ != null) { + subBuilder = scheduling_.toBuilder(); + } + scheduling_ = input.readMessage(com.google.cloud.dataproc.v1beta2.JobScheduling.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(scheduling_); + scheduling_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000400) == 0x00000400)) { + yarnApplications_ = java.util.Collections.unmodifiableList(yarnApplications_); + } + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + statusHistory_ = java.util.Collections.unmodifiableList(statusHistory_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_Job_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 18: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_Job_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.Job.class, com.google.cloud.dataproc.v1beta2.Job.Builder.class); + } + + private int bitField0_; + private int typeJobCase_ = 0; + private java.lang.Object typeJob_; + public enum TypeJobCase + implements com.google.protobuf.Internal.EnumLite { + HADOOP_JOB(3), + SPARK_JOB(4), + PYSPARK_JOB(5), + HIVE_JOB(6), + PIG_JOB(7), + SPARK_SQL_JOB(12), + TYPEJOB_NOT_SET(0); + private final int value; + private TypeJobCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TypeJobCase valueOf(int value) { + return forNumber(value); + } + + public static TypeJobCase forNumber(int value) { + switch (value) { + case 3: return HADOOP_JOB; + case 4: return SPARK_JOB; + case 5: return PYSPARK_JOB; + case 6: return HIVE_JOB; + case 7: return PIG_JOB; + case 12: return SPARK_SQL_JOB; + case 0: return TYPEJOB_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public TypeJobCase + getTypeJobCase() { + return TypeJobCase.forNumber( + typeJobCase_); + } + + public static final int REFERENCE_FIELD_NUMBER = 1; + private com.google.cloud.dataproc.v1beta2.JobReference reference_; + /** + *
+   * Optional. The fully qualified reference to the job, which can be used to
+   * obtain the equivalent REST path of the job resource. If this property
+   * is not specified when a job is created, the server generates a
+   * <code>job_id</code>.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public boolean hasReference() { + return reference_ != null; + } + /** + *
+   * Optional. The fully qualified reference to the job, which can be used to
+   * obtain the equivalent REST path of the job resource. If this property
+   * is not specified when a job is created, the server generates a
+   * <code>job_id</code>.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobReference getReference() { + return reference_ == null ? com.google.cloud.dataproc.v1beta2.JobReference.getDefaultInstance() : reference_; + } + /** + *
+   * Optional. The fully qualified reference to the job, which can be used to
+   * obtain the equivalent REST path of the job resource. If this property
+   * is not specified when a job is created, the server generates a
+   * <code>job_id</code>.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder getReferenceOrBuilder() { + return getReference(); + } + + public static final int PLACEMENT_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1beta2.JobPlacement placement_; + /** + *
+   * Required. Job information, including how, when, and where to
+   * run the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public boolean hasPlacement() { + return placement_ != null; + } + /** + *
+   * Required. Job information, including how, when, and where to
+   * run the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobPlacement getPlacement() { + return placement_ == null ? com.google.cloud.dataproc.v1beta2.JobPlacement.getDefaultInstance() : placement_; + } + /** + *
+   * Required. Job information, including how, when, and where to
+   * run the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder getPlacementOrBuilder() { + return getPlacement(); + } + + public static final int HADOOP_JOB_FIELD_NUMBER = 3; + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public boolean hasHadoopJob() { + return typeJobCase_ == 3; + } + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJob getHadoopJob() { + if (typeJobCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder getHadoopJobOrBuilder() { + if (typeJobCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + + public static final int SPARK_JOB_FIELD_NUMBER = 4; + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public boolean hasSparkJob() { + return typeJobCase_ == 4; + } + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.SparkJob getSparkJob() { + if (typeJobCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder getSparkJobOrBuilder() { + if (typeJobCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + + public static final int PYSPARK_JOB_FIELD_NUMBER = 5; + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public boolean hasPysparkJob() { + return typeJobCase_ == 5; + } + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJob getPysparkJob() { + if (typeJobCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder getPysparkJobOrBuilder() { + if (typeJobCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + + public static final int HIVE_JOB_FIELD_NUMBER = 6; + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public boolean hasHiveJob() { + return typeJobCase_ == 6; + } + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.HiveJob getHiveJob() { + if (typeJobCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder getHiveJobOrBuilder() { + if (typeJobCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + + public static final int PIG_JOB_FIELD_NUMBER = 7; + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public boolean hasPigJob() { + return typeJobCase_ == 7; + } + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.PigJob getPigJob() { + if (typeJobCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.PigJobOrBuilder getPigJobOrBuilder() { + if (typeJobCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + + public static final int SPARK_SQL_JOB_FIELD_NUMBER = 12; + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public boolean hasSparkSqlJob() { + return typeJobCase_ == 12; + } + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJob getSparkSqlJob() { + if (typeJobCase_ == 12) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { + if (typeJobCase_ == 12) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + + public static final int STATUS_FIELD_NUMBER = 8; + private com.google.cloud.dataproc.v1beta2.JobStatus status_; + /** + *
+   * Output only. The job status. Additional application-specific
+   * status information may be contained in the <code>type_job</code>
+   * and <code>yarn_applications</code> fields.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public boolean hasStatus() { + return status_ != null; + } + /** + *
+   * Output only. The job status. Additional application-specific
+   * status information may be contained in the <code>type_job</code>
+   * and <code>yarn_applications</code> fields.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus getStatus() { + return status_ == null ? com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance() : status_; + } + /** + *
+   * Output only. The job status. Additional application-specific
+   * status information may be contained in the <code>type_job</code>
+   * and <code>yarn_applications</code> fields.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusOrBuilder() { + return getStatus(); + } + + public static final int STATUS_HISTORY_FIELD_NUMBER = 13; + private java.util.List statusHistory_; + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public java.util.List getStatusHistoryList() { + return statusHistory_; + } + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public java.util.List + getStatusHistoryOrBuilderList() { + return statusHistory_; + } + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public int getStatusHistoryCount() { + return statusHistory_.size(); + } + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus getStatusHistory(int index) { + return statusHistory_.get(index); + } + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusHistoryOrBuilder( + int index) { + return statusHistory_.get(index); + } + + public static final int YARN_APPLICATIONS_FIELD_NUMBER = 9; + private java.util.List yarnApplications_; + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public java.util.List getYarnApplicationsList() { + return yarnApplications_; + } + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public java.util.List + getYarnApplicationsOrBuilderList() { + return yarnApplications_; + } + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public int getYarnApplicationsCount() { + return yarnApplications_.size(); + } + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication getYarnApplications(int index) { + return yarnApplications_.get(index); + } + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOrBuilder( + int index) { + return yarnApplications_.get(index); + } + + public static final int DRIVER_OUTPUT_RESOURCE_URI_FIELD_NUMBER = 17; + private volatile java.lang.Object driverOutputResourceUri_; + /** + *
+   * Output only. A URI pointing to the location of the stdout of the job's
+   * driver program.
+   * 
+ * + * string driver_output_resource_uri = 17; + */ + public java.lang.String getDriverOutputResourceUri() { + java.lang.Object ref = driverOutputResourceUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + driverOutputResourceUri_ = s; + return s; + } + } + /** + *
+   * Output only. A URI pointing to the location of the stdout of the job's
+   * driver program.
+   * 
+ * + * string driver_output_resource_uri = 17; + */ + public com.google.protobuf.ByteString + getDriverOutputResourceUriBytes() { + java.lang.Object ref = driverOutputResourceUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + driverOutputResourceUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DRIVER_CONTROL_FILES_URI_FIELD_NUMBER = 15; + private volatile java.lang.Object driverControlFilesUri_; + /** + *
+   * Output only. If present, the location of miscellaneous control files
+   * which may be used as part of job setup and handling. If not present,
+   * control files may be placed in the same location as `driver_output_uri`.
+   * 
+ * + * string driver_control_files_uri = 15; + */ + public java.lang.String getDriverControlFilesUri() { + java.lang.Object ref = driverControlFilesUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + driverControlFilesUri_ = s; + return s; + } + } + /** + *
+   * Output only. If present, the location of miscellaneous control files
+   * which may be used as part of job setup and handling. If not present,
+   * control files may be placed in the same location as `driver_output_uri`.
+   * 
+ * + * string driver_control_files_uri = 15; + */ + public com.google.protobuf.ByteString + getDriverControlFilesUriBytes() { + java.lang.Object ref = driverControlFilesUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + driverControlFilesUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LABELS_FIELD_NUMBER = 18; + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_Job_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int SCHEDULING_FIELD_NUMBER = 20; + private com.google.cloud.dataproc.v1beta2.JobScheduling scheduling_; + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public boolean hasScheduling() { + return scheduling_ != null; + } + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public com.google.cloud.dataproc.v1beta2.JobScheduling getScheduling() { + return scheduling_ == null ? com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance() : scheduling_; + } + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder getSchedulingOrBuilder() { + return getScheduling(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (reference_ != null) { + output.writeMessage(1, getReference()); + } + if (placement_ != null) { + output.writeMessage(2, getPlacement()); + } + if (typeJobCase_ == 3) { + output.writeMessage(3, (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_); + } + if (typeJobCase_ == 4) { + output.writeMessage(4, (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_); + } + if (typeJobCase_ == 5) { + output.writeMessage(5, (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_); + } + if (typeJobCase_ == 6) { + output.writeMessage(6, (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_); + } + if (typeJobCase_ == 7) { + output.writeMessage(7, (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_); + } + if (status_ != null) { + output.writeMessage(8, getStatus()); + } + for (int i = 0; i < yarnApplications_.size(); i++) { + output.writeMessage(9, yarnApplications_.get(i)); + } + if (typeJobCase_ == 12) { + output.writeMessage(12, (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_); + } + for (int i = 0; i < statusHistory_.size(); i++) { + output.writeMessage(13, statusHistory_.get(i)); + } + if (!getDriverControlFilesUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 15, driverControlFilesUri_); + } + if (!getDriverOutputResourceUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 17, driverOutputResourceUri_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetLabels(), + LabelsDefaultEntryHolder.defaultEntry, + 18); + if (scheduling_ != null) { + output.writeMessage(20, getScheduling()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (reference_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getReference()); + } + if (placement_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getPlacement()); + } + if (typeJobCase_ == 3) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_); + } + if (typeJobCase_ == 4) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_); + } + if (typeJobCase_ == 5) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_); + } + if (typeJobCase_ == 6) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_); + } + if (typeJobCase_ == 7) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_); + } + if (status_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, getStatus()); + } + for (int i = 0; i < yarnApplications_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, yarnApplications_.get(i)); + } + if (typeJobCase_ == 12) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(12, (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_); + } + for (int i = 0; i < statusHistory_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(13, statusHistory_.get(i)); + } + if (!getDriverControlFilesUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(15, driverControlFilesUri_); + } + if (!getDriverOutputResourceUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(17, driverOutputResourceUri_); + } + for (java.util.Map.Entry entry + : internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + labels__ = LabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(18, labels__); + } + if (scheduling_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(20, getScheduling()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.Job)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.Job other = (com.google.cloud.dataproc.v1beta2.Job) obj; + + boolean result = true; + result = result && (hasReference() == other.hasReference()); + if (hasReference()) { + result = result && getReference() + .equals(other.getReference()); + } + result = result && (hasPlacement() == other.hasPlacement()); + if (hasPlacement()) { + result = result && getPlacement() + .equals(other.getPlacement()); + } + result = result && (hasStatus() == other.hasStatus()); + if (hasStatus()) { + result = result && getStatus() + .equals(other.getStatus()); + } + result = result && getStatusHistoryList() + .equals(other.getStatusHistoryList()); + result = result && getYarnApplicationsList() + .equals(other.getYarnApplicationsList()); + result = result && getDriverOutputResourceUri() + .equals(other.getDriverOutputResourceUri()); + result = result && getDriverControlFilesUri() + .equals(other.getDriverControlFilesUri()); + result = result && internalGetLabels().equals( + other.internalGetLabels()); + result = result && (hasScheduling() == other.hasScheduling()); + if (hasScheduling()) { + result = result && getScheduling() + .equals(other.getScheduling()); + } + result = result && getTypeJobCase().equals( + other.getTypeJobCase()); + if (!result) return false; + switch (typeJobCase_) { + case 3: + result = result && getHadoopJob() + .equals(other.getHadoopJob()); + break; + case 4: + result = result && getSparkJob() + .equals(other.getSparkJob()); + break; + case 5: + result = result && getPysparkJob() + .equals(other.getPysparkJob()); + break; + case 6: + result = result && getHiveJob() + .equals(other.getHiveJob()); + break; + case 7: + result = result && getPigJob() + .equals(other.getPigJob()); + break; + case 12: + result = result && getSparkSqlJob() + .equals(other.getSparkSqlJob()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReference()) { + hash = (37 * hash) + REFERENCE_FIELD_NUMBER; + hash = (53 * hash) + getReference().hashCode(); + } + if (hasPlacement()) { + hash = (37 * hash) + PLACEMENT_FIELD_NUMBER; + hash = (53 * hash) + getPlacement().hashCode(); + } + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (getStatusHistoryCount() > 0) { + hash = (37 * hash) + STATUS_HISTORY_FIELD_NUMBER; + hash = (53 * hash) + getStatusHistoryList().hashCode(); + } + if (getYarnApplicationsCount() > 0) { + hash = (37 * hash) + YARN_APPLICATIONS_FIELD_NUMBER; + hash = (53 * hash) + getYarnApplicationsList().hashCode(); + } + hash = (37 * hash) + DRIVER_OUTPUT_RESOURCE_URI_FIELD_NUMBER; + hash = (53 * hash) + getDriverOutputResourceUri().hashCode(); + hash = (37 * hash) + DRIVER_CONTROL_FILES_URI_FIELD_NUMBER; + hash = (53 * hash) + getDriverControlFilesUri().hashCode(); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasScheduling()) { + hash = (37 * hash) + SCHEDULING_FIELD_NUMBER; + hash = (53 * hash) + getScheduling().hashCode(); + } + switch (typeJobCase_) { + case 3: + hash = (37 * hash) + HADOOP_JOB_FIELD_NUMBER; + hash = (53 * hash) + getHadoopJob().hashCode(); + break; + case 4: + hash = (37 * hash) + SPARK_JOB_FIELD_NUMBER; + hash = (53 * hash) + getSparkJob().hashCode(); + break; + case 5: + hash = (37 * hash) + PYSPARK_JOB_FIELD_NUMBER; + hash = (53 * hash) + getPysparkJob().hashCode(); + break; + case 6: + hash = (37 * hash) + HIVE_JOB_FIELD_NUMBER; + hash = (53 * hash) + getHiveJob().hashCode(); + break; + case 7: + hash = (37 * hash) + PIG_JOB_FIELD_NUMBER; + hash = (53 * hash) + getPigJob().hashCode(); + break; + case 12: + hash = (37 * hash) + SPARK_SQL_JOB_FIELD_NUMBER; + hash = (53 * hash) + getSparkSqlJob().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Job parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.Job parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.Job parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.Job prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job resource.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.Job} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.Job) + com.google.cloud.dataproc.v1beta2.JobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_Job_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 18: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 18: + return internalGetMutableLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_Job_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.Job.class, com.google.cloud.dataproc.v1beta2.Job.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.Job.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getStatusHistoryFieldBuilder(); + getYarnApplicationsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (referenceBuilder_ == null) { + reference_ = null; + } else { + reference_ = null; + referenceBuilder_ = null; + } + if (placementBuilder_ == null) { + placement_ = null; + } else { + placement_ = null; + placementBuilder_ = null; + } + if (statusBuilder_ == null) { + status_ = null; + } else { + status_ = null; + statusBuilder_ = null; + } + if (statusHistoryBuilder_ == null) { + statusHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + } else { + statusHistoryBuilder_.clear(); + } + if (yarnApplicationsBuilder_ == null) { + yarnApplications_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + } else { + yarnApplicationsBuilder_.clear(); + } + driverOutputResourceUri_ = ""; + + driverControlFilesUri_ = ""; + + internalGetMutableLabels().clear(); + if (schedulingBuilder_ == null) { + scheduling_ = null; + } else { + scheduling_ = null; + schedulingBuilder_ = null; + } + typeJobCase_ = 0; + typeJob_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_Job_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Job getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Job build() { + com.google.cloud.dataproc.v1beta2.Job result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Job buildPartial() { + com.google.cloud.dataproc.v1beta2.Job result = new com.google.cloud.dataproc.v1beta2.Job(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (referenceBuilder_ == null) { + result.reference_ = reference_; + } else { + result.reference_ = referenceBuilder_.build(); + } + if (placementBuilder_ == null) { + result.placement_ = placement_; + } else { + result.placement_ = placementBuilder_.build(); + } + if (typeJobCase_ == 3) { + if (hadoopJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = hadoopJobBuilder_.build(); + } + } + if (typeJobCase_ == 4) { + if (sparkJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = sparkJobBuilder_.build(); + } + } + if (typeJobCase_ == 5) { + if (pysparkJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = pysparkJobBuilder_.build(); + } + } + if (typeJobCase_ == 6) { + if (hiveJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = hiveJobBuilder_.build(); + } + } + if (typeJobCase_ == 7) { + if (pigJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = pigJobBuilder_.build(); + } + } + if (typeJobCase_ == 12) { + if (sparkSqlJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = sparkSqlJobBuilder_.build(); + } + } + if (statusBuilder_ == null) { + result.status_ = status_; + } else { + result.status_ = statusBuilder_.build(); + } + if (statusHistoryBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { + statusHistory_ = java.util.Collections.unmodifiableList(statusHistory_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.statusHistory_ = statusHistory_; + } else { + result.statusHistory_ = statusHistoryBuilder_.build(); + } + if (yarnApplicationsBuilder_ == null) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { + yarnApplications_ = java.util.Collections.unmodifiableList(yarnApplications_); + bitField0_ = (bitField0_ & ~0x00000400); + } + result.yarnApplications_ = yarnApplications_; + } else { + result.yarnApplications_ = yarnApplicationsBuilder_.build(); + } + result.driverOutputResourceUri_ = driverOutputResourceUri_; + result.driverControlFilesUri_ = driverControlFilesUri_; + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + if (schedulingBuilder_ == null) { + result.scheduling_ = scheduling_; + } else { + result.scheduling_ = schedulingBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + result.typeJobCase_ = typeJobCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.Job) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.Job)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.Job other) { + if (other == com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance()) return this; + if (other.hasReference()) { + mergeReference(other.getReference()); + } + if (other.hasPlacement()) { + mergePlacement(other.getPlacement()); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (statusHistoryBuilder_ == null) { + if (!other.statusHistory_.isEmpty()) { + if (statusHistory_.isEmpty()) { + statusHistory_ = other.statusHistory_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureStatusHistoryIsMutable(); + statusHistory_.addAll(other.statusHistory_); + } + onChanged(); + } + } else { + if (!other.statusHistory_.isEmpty()) { + if (statusHistoryBuilder_.isEmpty()) { + statusHistoryBuilder_.dispose(); + statusHistoryBuilder_ = null; + statusHistory_ = other.statusHistory_; + bitField0_ = (bitField0_ & ~0x00000200); + statusHistoryBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getStatusHistoryFieldBuilder() : null; + } else { + statusHistoryBuilder_.addAllMessages(other.statusHistory_); + } + } + } + if (yarnApplicationsBuilder_ == null) { + if (!other.yarnApplications_.isEmpty()) { + if (yarnApplications_.isEmpty()) { + yarnApplications_ = other.yarnApplications_; + bitField0_ = (bitField0_ & ~0x00000400); + } else { + ensureYarnApplicationsIsMutable(); + yarnApplications_.addAll(other.yarnApplications_); + } + onChanged(); + } + } else { + if (!other.yarnApplications_.isEmpty()) { + if (yarnApplicationsBuilder_.isEmpty()) { + yarnApplicationsBuilder_.dispose(); + yarnApplicationsBuilder_ = null; + yarnApplications_ = other.yarnApplications_; + bitField0_ = (bitField0_ & ~0x00000400); + yarnApplicationsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getYarnApplicationsFieldBuilder() : null; + } else { + yarnApplicationsBuilder_.addAllMessages(other.yarnApplications_); + } + } + } + if (!other.getDriverOutputResourceUri().isEmpty()) { + driverOutputResourceUri_ = other.driverOutputResourceUri_; + onChanged(); + } + if (!other.getDriverControlFilesUri().isEmpty()) { + driverControlFilesUri_ = other.driverControlFilesUri_; + onChanged(); + } + internalGetMutableLabels().mergeFrom( + other.internalGetLabels()); + if (other.hasScheduling()) { + mergeScheduling(other.getScheduling()); + } + switch (other.getTypeJobCase()) { + case HADOOP_JOB: { + mergeHadoopJob(other.getHadoopJob()); + break; + } + case SPARK_JOB: { + mergeSparkJob(other.getSparkJob()); + break; + } + case PYSPARK_JOB: { + mergePysparkJob(other.getPysparkJob()); + break; + } + case HIVE_JOB: { + mergeHiveJob(other.getHiveJob()); + break; + } + case PIG_JOB: { + mergePigJob(other.getPigJob()); + break; + } + case SPARK_SQL_JOB: { + mergeSparkSqlJob(other.getSparkSqlJob()); + break; + } + case TYPEJOB_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.Job parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.Job) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int typeJobCase_ = 0; + private java.lang.Object typeJob_; + public TypeJobCase + getTypeJobCase() { + return TypeJobCase.forNumber( + typeJobCase_); + } + + public Builder clearTypeJob() { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.cloud.dataproc.v1beta2.JobReference reference_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobReference, com.google.cloud.dataproc.v1beta2.JobReference.Builder, com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder> referenceBuilder_; + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public boolean hasReference() { + return referenceBuilder_ != null || reference_ != null; + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobReference getReference() { + if (referenceBuilder_ == null) { + return reference_ == null ? com.google.cloud.dataproc.v1beta2.JobReference.getDefaultInstance() : reference_; + } else { + return referenceBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public Builder setReference(com.google.cloud.dataproc.v1beta2.JobReference value) { + if (referenceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + reference_ = value; + onChanged(); + } else { + referenceBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public Builder setReference( + com.google.cloud.dataproc.v1beta2.JobReference.Builder builderForValue) { + if (referenceBuilder_ == null) { + reference_ = builderForValue.build(); + onChanged(); + } else { + referenceBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public Builder mergeReference(com.google.cloud.dataproc.v1beta2.JobReference value) { + if (referenceBuilder_ == null) { + if (reference_ != null) { + reference_ = + com.google.cloud.dataproc.v1beta2.JobReference.newBuilder(reference_).mergeFrom(value).buildPartial(); + } else { + reference_ = value; + } + onChanged(); + } else { + referenceBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public Builder clearReference() { + if (referenceBuilder_ == null) { + reference_ = null; + onChanged(); + } else { + reference_ = null; + referenceBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobReference.Builder getReferenceBuilder() { + + onChanged(); + return getReferenceFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder getReferenceOrBuilder() { + if (referenceBuilder_ != null) { + return referenceBuilder_.getMessageOrBuilder(); + } else { + return reference_ == null ? + com.google.cloud.dataproc.v1beta2.JobReference.getDefaultInstance() : reference_; + } + } + /** + *
+     * Optional. The fully qualified reference to the job, which can be used to
+     * obtain the equivalent REST path of the job resource. If this property
+     * is not specified when a job is created, the server generates a
+     * <code>job_id</code>.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobReference, com.google.cloud.dataproc.v1beta2.JobReference.Builder, com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder> + getReferenceFieldBuilder() { + if (referenceBuilder_ == null) { + referenceBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobReference, com.google.cloud.dataproc.v1beta2.JobReference.Builder, com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder>( + getReference(), + getParentForChildren(), + isClean()); + reference_ = null; + } + return referenceBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.JobPlacement placement_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobPlacement, com.google.cloud.dataproc.v1beta2.JobPlacement.Builder, com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder> placementBuilder_; + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public boolean hasPlacement() { + return placementBuilder_ != null || placement_ != null; + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobPlacement getPlacement() { + if (placementBuilder_ == null) { + return placement_ == null ? com.google.cloud.dataproc.v1beta2.JobPlacement.getDefaultInstance() : placement_; + } else { + return placementBuilder_.getMessage(); + } + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public Builder setPlacement(com.google.cloud.dataproc.v1beta2.JobPlacement value) { + if (placementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + placement_ = value; + onChanged(); + } else { + placementBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public Builder setPlacement( + com.google.cloud.dataproc.v1beta2.JobPlacement.Builder builderForValue) { + if (placementBuilder_ == null) { + placement_ = builderForValue.build(); + onChanged(); + } else { + placementBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public Builder mergePlacement(com.google.cloud.dataproc.v1beta2.JobPlacement value) { + if (placementBuilder_ == null) { + if (placement_ != null) { + placement_ = + com.google.cloud.dataproc.v1beta2.JobPlacement.newBuilder(placement_).mergeFrom(value).buildPartial(); + } else { + placement_ = value; + } + onChanged(); + } else { + placementBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public Builder clearPlacement() { + if (placementBuilder_ == null) { + placement_ = null; + onChanged(); + } else { + placement_ = null; + placementBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobPlacement.Builder getPlacementBuilder() { + + onChanged(); + return getPlacementFieldBuilder().getBuilder(); + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder getPlacementOrBuilder() { + if (placementBuilder_ != null) { + return placementBuilder_.getMessageOrBuilder(); + } else { + return placement_ == null ? + com.google.cloud.dataproc.v1beta2.JobPlacement.getDefaultInstance() : placement_; + } + } + /** + *
+     * Required. Job information, including how, when, and where to
+     * run the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobPlacement, com.google.cloud.dataproc.v1beta2.JobPlacement.Builder, com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder> + getPlacementFieldBuilder() { + if (placementBuilder_ == null) { + placementBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobPlacement, com.google.cloud.dataproc.v1beta2.JobPlacement.Builder, com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder>( + getPlacement(), + getParentForChildren(), + isClean()); + placement_ = null; + } + return placementBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HadoopJob, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder, com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder> hadoopJobBuilder_; + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public boolean hasHadoopJob() { + return typeJobCase_ == 3; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJob getHadoopJob() { + if (hadoopJobBuilder_ == null) { + if (typeJobCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 3) { + return hadoopJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public Builder setHadoopJob(com.google.cloud.dataproc.v1beta2.HadoopJob value) { + if (hadoopJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + hadoopJobBuilder_.setMessage(value); + } + typeJobCase_ = 3; + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public Builder setHadoopJob( + com.google.cloud.dataproc.v1beta2.HadoopJob.Builder builderForValue) { + if (hadoopJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + hadoopJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 3; + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public Builder mergeHadoopJob(com.google.cloud.dataproc.v1beta2.HadoopJob value) { + if (hadoopJobBuilder_ == null) { + if (typeJobCase_ == 3 && + typeJob_ != com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance()) { + typeJob_ = com.google.cloud.dataproc.v1beta2.HadoopJob.newBuilder((com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_) + .mergeFrom(value).buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 3) { + hadoopJobBuilder_.mergeFrom(value); + } + hadoopJobBuilder_.setMessage(value); + } + typeJobCase_ = 3; + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public Builder clearHadoopJob() { + if (hadoopJobBuilder_ == null) { + if (typeJobCase_ == 3) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 3) { + typeJobCase_ = 0; + typeJob_ = null; + } + hadoopJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJob.Builder getHadoopJobBuilder() { + return getHadoopJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder getHadoopJobOrBuilder() { + if ((typeJobCase_ == 3) && (hadoopJobBuilder_ != null)) { + return hadoopJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HadoopJob, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder, com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder> + getHadoopJobFieldBuilder() { + if (hadoopJobBuilder_ == null) { + if (!(typeJobCase_ == 3)) { + typeJob_ = com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + hadoopJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HadoopJob, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder, com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.HadoopJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 3; + onChanged();; + return hadoopJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkJob, com.google.cloud.dataproc.v1beta2.SparkJob.Builder, com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder> sparkJobBuilder_; + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public boolean hasSparkJob() { + return typeJobCase_ == 4; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.SparkJob getSparkJob() { + if (sparkJobBuilder_ == null) { + if (typeJobCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 4) { + return sparkJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public Builder setSparkJob(com.google.cloud.dataproc.v1beta2.SparkJob value) { + if (sparkJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + sparkJobBuilder_.setMessage(value); + } + typeJobCase_ = 4; + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public Builder setSparkJob( + com.google.cloud.dataproc.v1beta2.SparkJob.Builder builderForValue) { + if (sparkJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + sparkJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 4; + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public Builder mergeSparkJob(com.google.cloud.dataproc.v1beta2.SparkJob value) { + if (sparkJobBuilder_ == null) { + if (typeJobCase_ == 4 && + typeJob_ != com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance()) { + typeJob_ = com.google.cloud.dataproc.v1beta2.SparkJob.newBuilder((com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_) + .mergeFrom(value).buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 4) { + sparkJobBuilder_.mergeFrom(value); + } + sparkJobBuilder_.setMessage(value); + } + typeJobCase_ = 4; + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public Builder clearSparkJob() { + if (sparkJobBuilder_ == null) { + if (typeJobCase_ == 4) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 4) { + typeJobCase_ = 0; + typeJob_ = null; + } + sparkJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.SparkJob.Builder getSparkJobBuilder() { + return getSparkJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder getSparkJobOrBuilder() { + if ((typeJobCase_ == 4) && (sparkJobBuilder_ != null)) { + return sparkJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkJob, com.google.cloud.dataproc.v1beta2.SparkJob.Builder, com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder> + getSparkJobFieldBuilder() { + if (sparkJobBuilder_ == null) { + if (!(typeJobCase_ == 4)) { + typeJob_ = com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + sparkJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkJob, com.google.cloud.dataproc.v1beta2.SparkJob.Builder, com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.SparkJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 4; + onChanged();; + return sparkJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PySparkJob, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder, com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder> pysparkJobBuilder_; + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public boolean hasPysparkJob() { + return typeJobCase_ == 5; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJob getPysparkJob() { + if (pysparkJobBuilder_ == null) { + if (typeJobCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 5) { + return pysparkJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public Builder setPysparkJob(com.google.cloud.dataproc.v1beta2.PySparkJob value) { + if (pysparkJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + pysparkJobBuilder_.setMessage(value); + } + typeJobCase_ = 5; + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public Builder setPysparkJob( + com.google.cloud.dataproc.v1beta2.PySparkJob.Builder builderForValue) { + if (pysparkJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + pysparkJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 5; + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public Builder mergePysparkJob(com.google.cloud.dataproc.v1beta2.PySparkJob value) { + if (pysparkJobBuilder_ == null) { + if (typeJobCase_ == 5 && + typeJob_ != com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance()) { + typeJob_ = com.google.cloud.dataproc.v1beta2.PySparkJob.newBuilder((com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_) + .mergeFrom(value).buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 5) { + pysparkJobBuilder_.mergeFrom(value); + } + pysparkJobBuilder_.setMessage(value); + } + typeJobCase_ = 5; + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public Builder clearPysparkJob() { + if (pysparkJobBuilder_ == null) { + if (typeJobCase_ == 5) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 5) { + typeJobCase_ = 0; + typeJob_ = null; + } + pysparkJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJob.Builder getPysparkJobBuilder() { + return getPysparkJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder getPysparkJobOrBuilder() { + if ((typeJobCase_ == 5) && (pysparkJobBuilder_ != null)) { + return pysparkJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PySparkJob, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder, com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder> + getPysparkJobFieldBuilder() { + if (pysparkJobBuilder_ == null) { + if (!(typeJobCase_ == 5)) { + typeJob_ = com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + pysparkJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PySparkJob, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder, com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.PySparkJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 5; + onChanged();; + return pysparkJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HiveJob, com.google.cloud.dataproc.v1beta2.HiveJob.Builder, com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder> hiveJobBuilder_; + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public boolean hasHiveJob() { + return typeJobCase_ == 6; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.HiveJob getHiveJob() { + if (hiveJobBuilder_ == null) { + if (typeJobCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 6) { + return hiveJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public Builder setHiveJob(com.google.cloud.dataproc.v1beta2.HiveJob value) { + if (hiveJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + hiveJobBuilder_.setMessage(value); + } + typeJobCase_ = 6; + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public Builder setHiveJob( + com.google.cloud.dataproc.v1beta2.HiveJob.Builder builderForValue) { + if (hiveJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + hiveJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 6; + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public Builder mergeHiveJob(com.google.cloud.dataproc.v1beta2.HiveJob value) { + if (hiveJobBuilder_ == null) { + if (typeJobCase_ == 6 && + typeJob_ != com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance()) { + typeJob_ = com.google.cloud.dataproc.v1beta2.HiveJob.newBuilder((com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_) + .mergeFrom(value).buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 6) { + hiveJobBuilder_.mergeFrom(value); + } + hiveJobBuilder_.setMessage(value); + } + typeJobCase_ = 6; + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public Builder clearHiveJob() { + if (hiveJobBuilder_ == null) { + if (typeJobCase_ == 6) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 6) { + typeJobCase_ = 0; + typeJob_ = null; + } + hiveJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.HiveJob.Builder getHiveJobBuilder() { + return getHiveJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder getHiveJobOrBuilder() { + if ((typeJobCase_ == 6) && (hiveJobBuilder_ != null)) { + return hiveJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HiveJob, com.google.cloud.dataproc.v1beta2.HiveJob.Builder, com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder> + getHiveJobFieldBuilder() { + if (hiveJobBuilder_ == null) { + if (!(typeJobCase_ == 6)) { + typeJob_ = com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + hiveJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HiveJob, com.google.cloud.dataproc.v1beta2.HiveJob.Builder, com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.HiveJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 6; + onChanged();; + return hiveJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PigJob, com.google.cloud.dataproc.v1beta2.PigJob.Builder, com.google.cloud.dataproc.v1beta2.PigJobOrBuilder> pigJobBuilder_; + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public boolean hasPigJob() { + return typeJobCase_ == 7; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.PigJob getPigJob() { + if (pigJobBuilder_ == null) { + if (typeJobCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 7) { + return pigJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public Builder setPigJob(com.google.cloud.dataproc.v1beta2.PigJob value) { + if (pigJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + pigJobBuilder_.setMessage(value); + } + typeJobCase_ = 7; + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public Builder setPigJob( + com.google.cloud.dataproc.v1beta2.PigJob.Builder builderForValue) { + if (pigJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + pigJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 7; + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public Builder mergePigJob(com.google.cloud.dataproc.v1beta2.PigJob value) { + if (pigJobBuilder_ == null) { + if (typeJobCase_ == 7 && + typeJob_ != com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance()) { + typeJob_ = com.google.cloud.dataproc.v1beta2.PigJob.newBuilder((com.google.cloud.dataproc.v1beta2.PigJob) typeJob_) + .mergeFrom(value).buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 7) { + pigJobBuilder_.mergeFrom(value); + } + pigJobBuilder_.setMessage(value); + } + typeJobCase_ = 7; + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public Builder clearPigJob() { + if (pigJobBuilder_ == null) { + if (typeJobCase_ == 7) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 7) { + typeJobCase_ = 0; + typeJob_ = null; + } + pigJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.PigJob.Builder getPigJobBuilder() { + return getPigJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.PigJobOrBuilder getPigJobOrBuilder() { + if ((typeJobCase_ == 7) && (pigJobBuilder_ != null)) { + return pigJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PigJob, com.google.cloud.dataproc.v1beta2.PigJob.Builder, com.google.cloud.dataproc.v1beta2.PigJobOrBuilder> + getPigJobFieldBuilder() { + if (pigJobBuilder_ == null) { + if (!(typeJobCase_ == 7)) { + typeJob_ = com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + pigJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PigJob, com.google.cloud.dataproc.v1beta2.PigJob.Builder, com.google.cloud.dataproc.v1beta2.PigJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.PigJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 7; + onChanged();; + return pigJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkSqlJob, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder, com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder> sparkSqlJobBuilder_; + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public boolean hasSparkSqlJob() { + return typeJobCase_ == 12; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJob getSparkSqlJob() { + if (sparkSqlJobBuilder_ == null) { + if (typeJobCase_ == 12) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 12) { + return sparkSqlJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public Builder setSparkSqlJob(com.google.cloud.dataproc.v1beta2.SparkSqlJob value) { + if (sparkSqlJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + sparkSqlJobBuilder_.setMessage(value); + } + typeJobCase_ = 12; + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public Builder setSparkSqlJob( + com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder builderForValue) { + if (sparkSqlJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + sparkSqlJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 12; + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1beta2.SparkSqlJob value) { + if (sparkSqlJobBuilder_ == null) { + if (typeJobCase_ == 12 && + typeJob_ != com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance()) { + typeJob_ = com.google.cloud.dataproc.v1beta2.SparkSqlJob.newBuilder((com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_) + .mergeFrom(value).buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 12) { + sparkSqlJobBuilder_.mergeFrom(value); + } + sparkSqlJobBuilder_.setMessage(value); + } + typeJobCase_ = 12; + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public Builder clearSparkSqlJob() { + if (sparkSqlJobBuilder_ == null) { + if (typeJobCase_ == 12) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 12) { + typeJobCase_ = 0; + typeJob_ = null; + } + sparkSqlJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder getSparkSqlJobBuilder() { + return getSparkSqlJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { + if ((typeJobCase_ == 12) && (sparkSqlJobBuilder_ != null)) { + return sparkSqlJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 12) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkSqlJob, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder, com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder> + getSparkSqlJobFieldBuilder() { + if (sparkSqlJobBuilder_ == null) { + if (!(typeJobCase_ == 12)) { + typeJob_ = com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + sparkSqlJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkSqlJob, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder, com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.SparkSqlJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 12; + onChanged();; + return sparkSqlJobBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.JobStatus status_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobStatus, com.google.cloud.dataproc.v1beta2.JobStatus.Builder, com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder> statusBuilder_; + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public boolean hasStatus() { + return statusBuilder_ != null || status_ != null; + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public Builder setStatus(com.google.cloud.dataproc.v1beta2.JobStatus value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + onChanged(); + } else { + statusBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public Builder setStatus( + com.google.cloud.dataproc.v1beta2.JobStatus.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + onChanged(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public Builder mergeStatus(com.google.cloud.dataproc.v1beta2.JobStatus value) { + if (statusBuilder_ == null) { + if (status_ != null) { + status_ = + com.google.cloud.dataproc.v1beta2.JobStatus.newBuilder(status_).mergeFrom(value).buildPartial(); + } else { + status_ = value; + } + onChanged(); + } else { + statusBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public Builder clearStatus() { + if (statusBuilder_ == null) { + status_ = null; + onChanged(); + } else { + status_ = null; + statusBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.Builder getStatusBuilder() { + + onChanged(); + return getStatusFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? + com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance() : status_; + } + } + /** + *
+     * Output only. The job status. Additional application-specific
+     * status information may be contained in the <code>type_job</code>
+     * and <code>yarn_applications</code> fields.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobStatus, com.google.cloud.dataproc.v1beta2.JobStatus.Builder, com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder> + getStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobStatus, com.google.cloud.dataproc.v1beta2.JobStatus.Builder, com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder>( + getStatus(), + getParentForChildren(), + isClean()); + status_ = null; + } + return statusBuilder_; + } + + private java.util.List statusHistory_ = + java.util.Collections.emptyList(); + private void ensureStatusHistoryIsMutable() { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { + statusHistory_ = new java.util.ArrayList(statusHistory_); + bitField0_ |= 0x00000200; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobStatus, com.google.cloud.dataproc.v1beta2.JobStatus.Builder, com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder> statusHistoryBuilder_; + + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public java.util.List getStatusHistoryList() { + if (statusHistoryBuilder_ == null) { + return java.util.Collections.unmodifiableList(statusHistory_); + } else { + return statusHistoryBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public int getStatusHistoryCount() { + if (statusHistoryBuilder_ == null) { + return statusHistory_.size(); + } else { + return statusHistoryBuilder_.getCount(); + } + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus getStatusHistory(int index) { + if (statusHistoryBuilder_ == null) { + return statusHistory_.get(index); + } else { + return statusHistoryBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder setStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.JobStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.set(index, value); + onChanged(); + } else { + statusHistoryBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder setStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.JobStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.set(index, builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder addStatusHistory(com.google.cloud.dataproc.v1beta2.JobStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.add(value); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder addStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.JobStatus value) { + if (statusHistoryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatusHistoryIsMutable(); + statusHistory_.add(index, value); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder addStatusHistory( + com.google.cloud.dataproc.v1beta2.JobStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.add(builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder addStatusHistory( + int index, com.google.cloud.dataproc.v1beta2.JobStatus.Builder builderForValue) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.add(index, builderForValue.build()); + onChanged(); + } else { + statusHistoryBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder addAllStatusHistory( + java.lang.Iterable values) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, statusHistory_); + onChanged(); + } else { + statusHistoryBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder clearStatusHistory() { + if (statusHistoryBuilder_ == null) { + statusHistory_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + } else { + statusHistoryBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public Builder removeStatusHistory(int index) { + if (statusHistoryBuilder_ == null) { + ensureStatusHistoryIsMutable(); + statusHistory_.remove(index); + onChanged(); + } else { + statusHistoryBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.Builder getStatusHistoryBuilder( + int index) { + return getStatusHistoryFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusHistoryOrBuilder( + int index) { + if (statusHistoryBuilder_ == null) { + return statusHistory_.get(index); } else { + return statusHistoryBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public java.util.List + getStatusHistoryOrBuilderList() { + if (statusHistoryBuilder_ != null) { + return statusHistoryBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(statusHistory_); + } + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.Builder addStatusHistoryBuilder() { + return getStatusHistoryFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance()); + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.Builder addStatusHistoryBuilder( + int index) { + return getStatusHistoryFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance()); + } + /** + *
+     * Output only. The previous job status.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + public java.util.List + getStatusHistoryBuilderList() { + return getStatusHistoryFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobStatus, com.google.cloud.dataproc.v1beta2.JobStatus.Builder, com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder> + getStatusHistoryFieldBuilder() { + if (statusHistoryBuilder_ == null) { + statusHistoryBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobStatus, com.google.cloud.dataproc.v1beta2.JobStatus.Builder, com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder>( + statusHistory_, + ((bitField0_ & 0x00000200) == 0x00000200), + getParentForChildren(), + isClean()); + statusHistory_ = null; + } + return statusHistoryBuilder_; + } + + private java.util.List yarnApplications_ = + java.util.Collections.emptyList(); + private void ensureYarnApplicationsIsMutable() { + if (!((bitField0_ & 0x00000400) == 0x00000400)) { + yarnApplications_ = new java.util.ArrayList(yarnApplications_); + bitField0_ |= 0x00000400; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.YarnApplication, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder, com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder> yarnApplicationsBuilder_; + + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public java.util.List getYarnApplicationsList() { + if (yarnApplicationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(yarnApplications_); + } else { + return yarnApplicationsBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public int getYarnApplicationsCount() { + if (yarnApplicationsBuilder_ == null) { + return yarnApplications_.size(); + } else { + return yarnApplicationsBuilder_.getCount(); + } + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication getYarnApplications(int index) { + if (yarnApplicationsBuilder_ == null) { + return yarnApplications_.get(index); + } else { + return yarnApplicationsBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder setYarnApplications( + int index, com.google.cloud.dataproc.v1beta2.YarnApplication value) { + if (yarnApplicationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureYarnApplicationsIsMutable(); + yarnApplications_.set(index, value); + onChanged(); + } else { + yarnApplicationsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder setYarnApplications( + int index, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder builderForValue) { + if (yarnApplicationsBuilder_ == null) { + ensureYarnApplicationsIsMutable(); + yarnApplications_.set(index, builderForValue.build()); + onChanged(); + } else { + yarnApplicationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder addYarnApplications(com.google.cloud.dataproc.v1beta2.YarnApplication value) { + if (yarnApplicationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureYarnApplicationsIsMutable(); + yarnApplications_.add(value); + onChanged(); + } else { + yarnApplicationsBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder addYarnApplications( + int index, com.google.cloud.dataproc.v1beta2.YarnApplication value) { + if (yarnApplicationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureYarnApplicationsIsMutable(); + yarnApplications_.add(index, value); + onChanged(); + } else { + yarnApplicationsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder addYarnApplications( + com.google.cloud.dataproc.v1beta2.YarnApplication.Builder builderForValue) { + if (yarnApplicationsBuilder_ == null) { + ensureYarnApplicationsIsMutable(); + yarnApplications_.add(builderForValue.build()); + onChanged(); + } else { + yarnApplicationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder addYarnApplications( + int index, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder builderForValue) { + if (yarnApplicationsBuilder_ == null) { + ensureYarnApplicationsIsMutable(); + yarnApplications_.add(index, builderForValue.build()); + onChanged(); + } else { + yarnApplicationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder addAllYarnApplications( + java.lang.Iterable values) { + if (yarnApplicationsBuilder_ == null) { + ensureYarnApplicationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, yarnApplications_); + onChanged(); + } else { + yarnApplicationsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder clearYarnApplications() { + if (yarnApplicationsBuilder_ == null) { + yarnApplications_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + onChanged(); + } else { + yarnApplicationsBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public Builder removeYarnApplications(int index) { + if (yarnApplicationsBuilder_ == null) { + ensureYarnApplicationsIsMutable(); + yarnApplications_.remove(index); + onChanged(); + } else { + yarnApplicationsBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication.Builder getYarnApplicationsBuilder( + int index) { + return getYarnApplicationsFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOrBuilder( + int index) { + if (yarnApplicationsBuilder_ == null) { + return yarnApplications_.get(index); } else { + return yarnApplicationsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public java.util.List + getYarnApplicationsOrBuilderList() { + if (yarnApplicationsBuilder_ != null) { + return yarnApplicationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(yarnApplications_); + } + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication.Builder addYarnApplicationsBuilder() { + return getYarnApplicationsFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.YarnApplication.getDefaultInstance()); + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication.Builder addYarnApplicationsBuilder( + int index) { + return getYarnApplicationsFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.YarnApplication.getDefaultInstance()); + } + /** + *
+     * Output only. The collection of YARN applications spun up by this job.
+     * **Beta** Feature: This report is available for testing purposes only. It may
+     * be changed before final release.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + public java.util.List + getYarnApplicationsBuilderList() { + return getYarnApplicationsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.YarnApplication, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder, com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder> + getYarnApplicationsFieldBuilder() { + if (yarnApplicationsBuilder_ == null) { + yarnApplicationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.YarnApplication, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder, com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder>( + yarnApplications_, + ((bitField0_ & 0x00000400) == 0x00000400), + getParentForChildren(), + isClean()); + yarnApplications_ = null; + } + return yarnApplicationsBuilder_; + } + + private java.lang.Object driverOutputResourceUri_ = ""; + /** + *
+     * Output only. A URI pointing to the location of the stdout of the job's
+     * driver program.
+     * 
+ * + * string driver_output_resource_uri = 17; + */ + public java.lang.String getDriverOutputResourceUri() { + java.lang.Object ref = driverOutputResourceUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + driverOutputResourceUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. A URI pointing to the location of the stdout of the job's
+     * driver program.
+     * 
+ * + * string driver_output_resource_uri = 17; + */ + public com.google.protobuf.ByteString + getDriverOutputResourceUriBytes() { + java.lang.Object ref = driverOutputResourceUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + driverOutputResourceUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. A URI pointing to the location of the stdout of the job's
+     * driver program.
+     * 
+ * + * string driver_output_resource_uri = 17; + */ + public Builder setDriverOutputResourceUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + driverOutputResourceUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A URI pointing to the location of the stdout of the job's
+     * driver program.
+     * 
+ * + * string driver_output_resource_uri = 17; + */ + public Builder clearDriverOutputResourceUri() { + + driverOutputResourceUri_ = getDefaultInstance().getDriverOutputResourceUri(); + onChanged(); + return this; + } + /** + *
+     * Output only. A URI pointing to the location of the stdout of the job's
+     * driver program.
+     * 
+ * + * string driver_output_resource_uri = 17; + */ + public Builder setDriverOutputResourceUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + driverOutputResourceUri_ = value; + onChanged(); + return this; + } + + private java.lang.Object driverControlFilesUri_ = ""; + /** + *
+     * Output only. If present, the location of miscellaneous control files
+     * which may be used as part of job setup and handling. If not present,
+     * control files may be placed in the same location as `driver_output_uri`.
+     * 
+ * + * string driver_control_files_uri = 15; + */ + public java.lang.String getDriverControlFilesUri() { + java.lang.Object ref = driverControlFilesUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + driverControlFilesUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. If present, the location of miscellaneous control files
+     * which may be used as part of job setup and handling. If not present,
+     * control files may be placed in the same location as `driver_output_uri`.
+     * 
+ * + * string driver_control_files_uri = 15; + */ + public com.google.protobuf.ByteString + getDriverControlFilesUriBytes() { + java.lang.Object ref = driverControlFilesUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + driverControlFilesUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. If present, the location of miscellaneous control files
+     * which may be used as part of job setup and handling. If not present,
+     * control files may be placed in the same location as `driver_output_uri`.
+     * 
+ * + * string driver_control_files_uri = 15; + */ + public Builder setDriverControlFilesUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + driverControlFilesUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. If present, the location of miscellaneous control files
+     * which may be used as part of job setup and handling. If not present,
+     * control files may be placed in the same location as `driver_output_uri`.
+     * 
+ * + * string driver_control_files_uri = 15; + */ + public Builder clearDriverControlFilesUri() { + + driverControlFilesUri_ = getDefaultInstance().getDriverControlFilesUri(); + onChanged(); + return this; + } + /** + *
+     * Output only. If present, the location of miscellaneous control files
+     * which may be used as part of job setup and handling. If not present,
+     * control files may be placed in the same location as `driver_output_uri`.
+     * 
+ * + * string driver_control_files_uri = 15; + */ + public Builder setDriverControlFilesUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + driverControlFilesUri_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged();; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + + public Builder removeLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + public Builder putLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a job.
+     * 
+ * + * map<string, string> labels = 18; + */ + + public Builder putAllLabels( + java.util.Map values) { + internalGetMutableLabels().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.JobScheduling scheduling_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobScheduling, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder, com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder> schedulingBuilder_; + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public boolean hasScheduling() { + return schedulingBuilder_ != null || scheduling_ != null; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public com.google.cloud.dataproc.v1beta2.JobScheduling getScheduling() { + if (schedulingBuilder_ == null) { + return scheduling_ == null ? com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance() : scheduling_; + } else { + return schedulingBuilder_.getMessage(); + } + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public Builder setScheduling(com.google.cloud.dataproc.v1beta2.JobScheduling value) { + if (schedulingBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + scheduling_ = value; + onChanged(); + } else { + schedulingBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public Builder setScheduling( + com.google.cloud.dataproc.v1beta2.JobScheduling.Builder builderForValue) { + if (schedulingBuilder_ == null) { + scheduling_ = builderForValue.build(); + onChanged(); + } else { + schedulingBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public Builder mergeScheduling(com.google.cloud.dataproc.v1beta2.JobScheduling value) { + if (schedulingBuilder_ == null) { + if (scheduling_ != null) { + scheduling_ = + com.google.cloud.dataproc.v1beta2.JobScheduling.newBuilder(scheduling_).mergeFrom(value).buildPartial(); + } else { + scheduling_ = value; + } + onChanged(); + } else { + schedulingBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public Builder clearScheduling() { + if (schedulingBuilder_ == null) { + scheduling_ = null; + onChanged(); + } else { + scheduling_ = null; + schedulingBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public com.google.cloud.dataproc.v1beta2.JobScheduling.Builder getSchedulingBuilder() { + + onChanged(); + return getSchedulingFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + public com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder getSchedulingOrBuilder() { + if (schedulingBuilder_ != null) { + return schedulingBuilder_.getMessageOrBuilder(); + } else { + return scheduling_ == null ? + com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance() : scheduling_; + } + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobScheduling, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder, com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder> + getSchedulingFieldBuilder() { + if (schedulingBuilder_ == null) { + schedulingBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobScheduling, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder, com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder>( + getScheduling(), + getParentForChildren(), + isClean()); + scheduling_ = null; + } + return schedulingBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.Job) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.Job) + private static final com.google.cloud.dataproc.v1beta2.Job DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.Job(); + } + + public static com.google.cloud.dataproc.v1beta2.Job getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Job parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Job(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.Job getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java new file mode 100644 index 000000000000..bcdd41e1a69d --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java @@ -0,0 +1,498 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface JobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.Job) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. The fully qualified reference to the job, which can be used to
+   * obtain the equivalent REST path of the job resource. If this property
+   * is not specified when a job is created, the server generates a
+   * <code>job_id</code>.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + boolean hasReference(); + /** + *
+   * Optional. The fully qualified reference to the job, which can be used to
+   * obtain the equivalent REST path of the job resource. If this property
+   * is not specified when a job is created, the server generates a
+   * <code>job_id</code>.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + com.google.cloud.dataproc.v1beta2.JobReference getReference(); + /** + *
+   * Optional. The fully qualified reference to the job, which can be used to
+   * obtain the equivalent REST path of the job resource. If this property
+   * is not specified when a job is created, the server generates a
+   * <code>job_id</code>.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobReference reference = 1; + */ + com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder getReferenceOrBuilder(); + + /** + *
+   * Required. Job information, including how, when, and where to
+   * run the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + boolean hasPlacement(); + /** + *
+   * Required. Job information, including how, when, and where to
+   * run the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + com.google.cloud.dataproc.v1beta2.JobPlacement getPlacement(); + /** + *
+   * Required. Job information, including how, when, and where to
+   * run the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobPlacement placement = 2; + */ + com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder getPlacementOrBuilder(); + + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + boolean hasHadoopJob(); + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + com.google.cloud.dataproc.v1beta2.HadoopJob getHadoopJob(); + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 3; + */ + com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder getHadoopJobOrBuilder(); + + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + boolean hasSparkJob(); + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + com.google.cloud.dataproc.v1beta2.SparkJob getSparkJob(); + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 4; + */ + com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder getSparkJobOrBuilder(); + + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + boolean hasPysparkJob(); + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + com.google.cloud.dataproc.v1beta2.PySparkJob getPysparkJob(); + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 5; + */ + com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder getPysparkJobOrBuilder(); + + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + boolean hasHiveJob(); + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + com.google.cloud.dataproc.v1beta2.HiveJob getHiveJob(); + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 6; + */ + com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder getHiveJobOrBuilder(); + + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + boolean hasPigJob(); + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + com.google.cloud.dataproc.v1beta2.PigJob getPigJob(); + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 7; + */ + com.google.cloud.dataproc.v1beta2.PigJobOrBuilder getPigJobOrBuilder(); + + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + boolean hasSparkSqlJob(); + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + com.google.cloud.dataproc.v1beta2.SparkSqlJob getSparkSqlJob(); + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 12; + */ + com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder(); + + /** + *
+   * Output only. The job status. Additional application-specific
+   * status information may be contained in the <code>type_job</code>
+   * and <code>yarn_applications</code> fields.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + boolean hasStatus(); + /** + *
+   * Output only. The job status. Additional application-specific
+   * status information may be contained in the <code>type_job</code>
+   * and <code>yarn_applications</code> fields.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + com.google.cloud.dataproc.v1beta2.JobStatus getStatus(); + /** + *
+   * Output only. The job status. Additional application-specific
+   * status information may be contained in the <code>type_job</code>
+   * and <code>yarn_applications</code> fields.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus status = 8; + */ + com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusOrBuilder(); + + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + java.util.List + getStatusHistoryList(); + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + com.google.cloud.dataproc.v1beta2.JobStatus getStatusHistory(int index); + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + int getStatusHistoryCount(); + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + java.util.List + getStatusHistoryOrBuilderList(); + /** + *
+   * Output only. The previous job status.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.JobStatus status_history = 13; + */ + com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusHistoryOrBuilder( + int index); + + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + java.util.List + getYarnApplicationsList(); + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + com.google.cloud.dataproc.v1beta2.YarnApplication getYarnApplications(int index); + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + int getYarnApplicationsCount(); + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + java.util.List + getYarnApplicationsOrBuilderList(); + /** + *
+   * Output only. The collection of YARN applications spun up by this job.
+   * **Beta** Feature: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; + */ + com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOrBuilder( + int index); + + /** + *
+   * Output only. A URI pointing to the location of the stdout of the job's
+   * driver program.
+   * 
+ * + * string driver_output_resource_uri = 17; + */ + java.lang.String getDriverOutputResourceUri(); + /** + *
+   * Output only. A URI pointing to the location of the stdout of the job's
+   * driver program.
+   * 
+ * + * string driver_output_resource_uri = 17; + */ + com.google.protobuf.ByteString + getDriverOutputResourceUriBytes(); + + /** + *
+   * Output only. If present, the location of miscellaneous control files
+   * which may be used as part of job setup and handling. If not present,
+   * control files may be placed in the same location as `driver_output_uri`.
+   * 
+ * + * string driver_control_files_uri = 15; + */ + java.lang.String getDriverControlFilesUri(); + /** + *
+   * Output only. If present, the location of miscellaneous control files
+   * which may be used as part of job setup and handling. If not present,
+   * control files may be placed in the same location as `driver_output_uri`.
+   * 
+ * + * string driver_control_files_uri = 15; + */ + com.google.protobuf.ByteString + getDriverControlFilesUriBytes(); + + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + int getLabelsCount(); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + boolean containsLabels( + java.lang.String key); + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getLabels(); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + java.util.Map + getLabelsMap(); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + + java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a job.
+   * 
+ * + * map<string, string> labels = 18; + */ + + java.lang.String getLabelsOrThrow( + java.lang.String key); + + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + boolean hasScheduling(); + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + com.google.cloud.dataproc.v1beta2.JobScheduling getScheduling(); + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 20; + */ + com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder getSchedulingOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.Job.TypeJobCase getTypeJobCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacement.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacement.java new file mode 100644 index 000000000000..3d793381e2ef --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacement.java @@ -0,0 +1,739 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Cloud Dataproc job config.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobPlacement} + */ +public final class JobPlacement extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.JobPlacement) + JobPlacementOrBuilder { +private static final long serialVersionUID = 0L; + // Use JobPlacement.newBuilder() to construct. + private JobPlacement(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private JobPlacement() { + clusterName_ = ""; + clusterUuid_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private JobPlacement( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobPlacement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobPlacement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobPlacement.class, com.google.cloud.dataproc.v1beta2.JobPlacement.Builder.class); + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The name of the cluster where the job will be submitted.
+   * 
+ * + * string cluster_name = 1; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The name of the cluster where the job will be submitted.
+   * 
+ * + * string cluster_name = 1; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_UUID_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterUuid_; + /** + *
+   * Output only. A cluster UUID generated by the Cloud Dataproc service when
+   * the job is submitted.
+   * 
+ * + * string cluster_uuid = 2; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } + } + /** + *
+   * Output only. A cluster UUID generated by the Cloud Dataproc service when
+   * the job is submitted.
+   * 
+ * + * string cluster_uuid = 2; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterUuid_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, clusterName_); + } + if (!getClusterUuidBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterUuid_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.JobPlacement)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.JobPlacement other = (com.google.cloud.dataproc.v1beta2.JobPlacement) obj; + + boolean result = true; + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && getClusterUuid() + .equals(other.getClusterUuid()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (37 * hash) + CLUSTER_UUID_FIELD_NUMBER; + hash = (53 * hash) + getClusterUuid().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobPlacement parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.JobPlacement prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Cloud Dataproc job config.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobPlacement} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.JobPlacement) + com.google.cloud.dataproc.v1beta2.JobPlacementOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobPlacement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobPlacement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobPlacement.class, com.google.cloud.dataproc.v1beta2.JobPlacement.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.JobPlacement.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + clusterName_ = ""; + + clusterUuid_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobPlacement_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobPlacement getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.JobPlacement.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobPlacement build() { + com.google.cloud.dataproc.v1beta2.JobPlacement result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobPlacement buildPartial() { + com.google.cloud.dataproc.v1beta2.JobPlacement result = new com.google.cloud.dataproc.v1beta2.JobPlacement(this); + result.clusterName_ = clusterName_; + result.clusterUuid_ = clusterUuid_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.JobPlacement) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.JobPlacement)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.JobPlacement other) { + if (other == com.google.cloud.dataproc.v1beta2.JobPlacement.getDefaultInstance()) return this; + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (!other.getClusterUuid().isEmpty()) { + clusterUuid_ = other.clusterUuid_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.JobPlacement parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.JobPlacement) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The name of the cluster where the job will be submitted.
+     * 
+ * + * string cluster_name = 1; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The name of the cluster where the job will be submitted.
+     * 
+ * + * string cluster_name = 1; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The name of the cluster where the job will be submitted.
+     * 
+ * + * string cluster_name = 1; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The name of the cluster where the job will be submitted.
+     * 
+ * + * string cluster_name = 1; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The name of the cluster where the job will be submitted.
+     * 
+ * + * string cluster_name = 1; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterUuid_ = ""; + /** + *
+     * Output only. A cluster UUID generated by the Cloud Dataproc service when
+     * the job is submitted.
+     * 
+ * + * string cluster_uuid = 2; + */ + public java.lang.String getClusterUuid() { + java.lang.Object ref = clusterUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterUuid_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. A cluster UUID generated by the Cloud Dataproc service when
+     * the job is submitted.
+     * 
+ * + * string cluster_uuid = 2; + */ + public com.google.protobuf.ByteString + getClusterUuidBytes() { + java.lang.Object ref = clusterUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterUuid_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. A cluster UUID generated by the Cloud Dataproc service when
+     * the job is submitted.
+     * 
+ * + * string cluster_uuid = 2; + */ + public Builder setClusterUuid( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterUuid_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A cluster UUID generated by the Cloud Dataproc service when
+     * the job is submitted.
+     * 
+ * + * string cluster_uuid = 2; + */ + public Builder clearClusterUuid() { + + clusterUuid_ = getDefaultInstance().getClusterUuid(); + onChanged(); + return this; + } + /** + *
+     * Output only. A cluster UUID generated by the Cloud Dataproc service when
+     * the job is submitted.
+     * 
+ * + * string cluster_uuid = 2; + */ + public Builder setClusterUuidBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterUuid_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.JobPlacement) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobPlacement) + private static final com.google.cloud.dataproc.v1beta2.JobPlacement DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.JobPlacement(); + } + + public static com.google.cloud.dataproc.v1beta2.JobPlacement getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public JobPlacement parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new JobPlacement(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobPlacement getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacementOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacementOrBuilder.java new file mode 100644 index 000000000000..6fe3e63a9b54 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacementOrBuilder.java @@ -0,0 +1,47 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface JobPlacementOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.JobPlacement) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The name of the cluster where the job will be submitted.
+   * 
+ * + * string cluster_name = 1; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The name of the cluster where the job will be submitted.
+   * 
+ * + * string cluster_name = 1; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Output only. A cluster UUID generated by the Cloud Dataproc service when
+   * the job is submitted.
+   * 
+ * + * string cluster_uuid = 2; + */ + java.lang.String getClusterUuid(); + /** + *
+   * Output only. A cluster UUID generated by the Cloud Dataproc service when
+   * the job is submitted.
+   * 
+ * + * string cluster_uuid = 2; + */ + com.google.protobuf.ByteString + getClusterUuidBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReference.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReference.java new file mode 100644 index 000000000000..a1cb807f00f3 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReference.java @@ -0,0 +1,767 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Encapsulates the full scoping used to reference a job.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobReference} + */ +public final class JobReference extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.JobReference) + JobReferenceOrBuilder { +private static final long serialVersionUID = 0L; + // Use JobReference.newBuilder() to construct. + private JobReference(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private JobReference() { + projectId_ = ""; + jobId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private JobReference( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobReference_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobReference_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobReference.class, com.google.cloud.dataproc.v1beta2.JobReference.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object jobId_; + /** + *
+   * Optional. The job ID, which must be unique within the project. The job ID
+   * is generated by the server upon job submission or provided by the user as a
+   * means to perform retries without creating duplicate jobs. The ID must
+   * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+   * hyphens (-). The maximum length is 100 characters.
+   * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } + } + /** + *
+   * Optional. The job ID, which must be unique within the project. The job ID
+   * is generated by the server upon job submission or provided by the user as a
+   * means to perform retries without creating duplicate jobs. The ID must
+   * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+   * hyphens (-). The maximum length is 100 characters.
+   * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, jobId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getJobIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, jobId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.JobReference)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.JobReference other = (com.google.cloud.dataproc.v1beta2.JobReference) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getJobId() + .equals(other.getJobId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobReference parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.JobReference prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Encapsulates the full scoping used to reference a job.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobReference} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.JobReference) + com.google.cloud.dataproc.v1beta2.JobReferenceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobReference_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobReference_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobReference.class, com.google.cloud.dataproc.v1beta2.JobReference.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.JobReference.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + jobId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobReference_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobReference getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.JobReference.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobReference build() { + com.google.cloud.dataproc.v1beta2.JobReference result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobReference buildPartial() { + com.google.cloud.dataproc.v1beta2.JobReference result = new com.google.cloud.dataproc.v1beta2.JobReference(this); + result.projectId_ = projectId_; + result.jobId_ = jobId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.JobReference) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.JobReference)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.JobReference other) { + if (other == com.google.cloud.dataproc.v1beta2.JobReference.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getJobId().isEmpty()) { + jobId_ = other.jobId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.JobReference parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.JobReference) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object jobId_ = ""; + /** + *
+     * Optional. The job ID, which must be unique within the project. The job ID
+     * is generated by the server upon job submission or provided by the user as a
+     * means to perform retries without creating duplicate jobs. The ID must
+     * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+     * hyphens (-). The maximum length is 100 characters.
+     * 
+ * + * string job_id = 2; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The job ID, which must be unique within the project. The job ID
+     * is generated by the server upon job submission or provided by the user as a
+     * means to perform retries without creating duplicate jobs. The ID must
+     * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+     * hyphens (-). The maximum length is 100 characters.
+     * 
+ * + * string job_id = 2; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The job ID, which must be unique within the project. The job ID
+     * is generated by the server upon job submission or provided by the user as a
+     * means to perform retries without creating duplicate jobs. The ID must
+     * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+     * hyphens (-). The maximum length is 100 characters.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + jobId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The job ID, which must be unique within the project. The job ID
+     * is generated by the server upon job submission or provided by the user as a
+     * means to perform retries without creating duplicate jobs. The ID must
+     * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+     * hyphens (-). The maximum length is 100 characters.
+     * 
+ * + * string job_id = 2; + */ + public Builder clearJobId() { + + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + *
+     * Optional. The job ID, which must be unique within the project. The job ID
+     * is generated by the server upon job submission or provided by the user as a
+     * means to perform retries without creating duplicate jobs. The ID must
+     * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+     * hyphens (-). The maximum length is 100 characters.
+     * 
+ * + * string job_id = 2; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + jobId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.JobReference) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobReference) + private static final com.google.cloud.dataproc.v1beta2.JobReference DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.JobReference(); + } + + public static com.google.cloud.dataproc.v1beta2.JobReference getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public JobReference parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new JobReference(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobReference getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReferenceOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReferenceOrBuilder.java new file mode 100644 index 000000000000..6c305225b0ce --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReferenceOrBuilder.java @@ -0,0 +1,55 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface JobReferenceOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.JobReference) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Optional. The job ID, which must be unique within the project. The job ID
+   * is generated by the server upon job submission or provided by the user as a
+   * means to perform retries without creating duplicate jobs. The ID must
+   * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+   * hyphens (-). The maximum length is 100 characters.
+   * 
+ * + * string job_id = 2; + */ + java.lang.String getJobId(); + /** + *
+   * Optional. The job ID, which must be unique within the project. The job ID
+   * is generated by the server upon job submission or provided by the user as a
+   * means to perform retries without creating duplicate jobs. The ID must
+   * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
+   * hyphens (-). The maximum length is 100 characters.
+   * 
+ * + * string job_id = 2; + */ + com.google.protobuf.ByteString + getJobIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobScheduling.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobScheduling.java new file mode 100644 index 000000000000..17ac3bc210ce --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobScheduling.java @@ -0,0 +1,516 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Job scheduling options.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobScheduling} + */ +public final class JobScheduling extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.JobScheduling) + JobSchedulingOrBuilder { +private static final long serialVersionUID = 0L; + // Use JobScheduling.newBuilder() to construct. + private JobScheduling(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private JobScheduling() { + maxFailuresPerHour_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private JobScheduling( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + maxFailuresPerHour_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobScheduling_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobScheduling_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobScheduling.class, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder.class); + } + + public static final int MAX_FAILURES_PER_HOUR_FIELD_NUMBER = 1; + private int maxFailuresPerHour_; + /** + *
+   * Optional. Maximum number of times per hour a driver may be restarted as
+   * a result of driver terminating with non-zero code before job is
+   * reported failed.
+   * A job may be reported as thrashing if driver exits with non-zero code
+   * 4 times within 10 minute window.
+   * Maximum value is 10.
+   * 
+ * + * int32 max_failures_per_hour = 1; + */ + public int getMaxFailuresPerHour() { + return maxFailuresPerHour_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (maxFailuresPerHour_ != 0) { + output.writeInt32(1, maxFailuresPerHour_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (maxFailuresPerHour_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, maxFailuresPerHour_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.JobScheduling)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.JobScheduling other = (com.google.cloud.dataproc.v1beta2.JobScheduling) obj; + + boolean result = true; + result = result && (getMaxFailuresPerHour() + == other.getMaxFailuresPerHour()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAX_FAILURES_PER_HOUR_FIELD_NUMBER; + hash = (53 * hash) + getMaxFailuresPerHour(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobScheduling parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.JobScheduling prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Job scheduling options.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobScheduling} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.JobScheduling) + com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobScheduling_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobScheduling_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobScheduling.class, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.JobScheduling.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + maxFailuresPerHour_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobScheduling_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobScheduling getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobScheduling build() { + com.google.cloud.dataproc.v1beta2.JobScheduling result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobScheduling buildPartial() { + com.google.cloud.dataproc.v1beta2.JobScheduling result = new com.google.cloud.dataproc.v1beta2.JobScheduling(this); + result.maxFailuresPerHour_ = maxFailuresPerHour_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.JobScheduling) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.JobScheduling)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.JobScheduling other) { + if (other == com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance()) return this; + if (other.getMaxFailuresPerHour() != 0) { + setMaxFailuresPerHour(other.getMaxFailuresPerHour()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.JobScheduling parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.JobScheduling) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int maxFailuresPerHour_ ; + /** + *
+     * Optional. Maximum number of times per hour a driver may be restarted as
+     * a result of driver terminating with non-zero code before job is
+     * reported failed.
+     * A job may be reported as thrashing if driver exits with non-zero code
+     * 4 times within 10 minute window.
+     * Maximum value is 10.
+     * 
+ * + * int32 max_failures_per_hour = 1; + */ + public int getMaxFailuresPerHour() { + return maxFailuresPerHour_; + } + /** + *
+     * Optional. Maximum number of times per hour a driver may be restarted as
+     * a result of driver terminating with non-zero code before job is
+     * reported failed.
+     * A job may be reported as thrashing if driver exits with non-zero code
+     * 4 times within 10 minute window.
+     * Maximum value is 10.
+     * 
+ * + * int32 max_failures_per_hour = 1; + */ + public Builder setMaxFailuresPerHour(int value) { + + maxFailuresPerHour_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Maximum number of times per hour a driver may be restarted as
+     * a result of driver terminating with non-zero code before job is
+     * reported failed.
+     * A job may be reported as thrashing if driver exits with non-zero code
+     * 4 times within 10 minute window.
+     * Maximum value is 10.
+     * 
+ * + * int32 max_failures_per_hour = 1; + */ + public Builder clearMaxFailuresPerHour() { + + maxFailuresPerHour_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.JobScheduling) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobScheduling) + private static final com.google.cloud.dataproc.v1beta2.JobScheduling DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.JobScheduling(); + } + + public static com.google.cloud.dataproc.v1beta2.JobScheduling getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public JobScheduling parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new JobScheduling(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobScheduling getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobSchedulingOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobSchedulingOrBuilder.java new file mode 100644 index 000000000000..4bc8abdbe4b2 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobSchedulingOrBuilder.java @@ -0,0 +1,23 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface JobSchedulingOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.JobScheduling) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. Maximum number of times per hour a driver may be restarted as
+   * a result of driver terminating with non-zero code before job is
+   * reported failed.
+   * A job may be reported as thrashing if driver exits with non-zero code
+   * 4 times within 10 minute window.
+   * Maximum value is 10.
+   * 
+ * + * int32 max_failures_per_hour = 1; + */ + int getMaxFailuresPerHour(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatus.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatus.java new file mode 100644 index 000000000000..d509b2654847 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatus.java @@ -0,0 +1,1474 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Cloud Dataproc job status.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobStatus} + */ +public final class JobStatus extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.JobStatus) + JobStatusOrBuilder { +private static final long serialVersionUID = 0L; + // Use JobStatus.newBuilder() to construct. + private JobStatus(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private JobStatus() { + state_ = 0; + details_ = ""; + substate_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private JobStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + details_ = s; + break; + } + case 50: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (stateStartTime_ != null) { + subBuilder = stateStartTime_.toBuilder(); + } + stateStartTime_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stateStartTime_); + stateStartTime_ = subBuilder.buildPartial(); + } + + break; + } + case 56: { + int rawValue = input.readEnum(); + + substate_ = rawValue; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobStatus.class, com.google.cloud.dataproc.v1beta2.JobStatus.Builder.class); + } + + /** + *
+   * The job state.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.JobStatus.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * The job state is unknown.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + *
+     * The job is pending; it has been submitted, but is not yet running.
+     * 
+ * + * PENDING = 1; + */ + PENDING(1), + /** + *
+     * Job has been received by the service and completed initial setup;
+     * it will soon be submitted to the cluster.
+     * 
+ * + * SETUP_DONE = 8; + */ + SETUP_DONE(8), + /** + *
+     * The job is running on the cluster.
+     * 
+ * + * RUNNING = 2; + */ + RUNNING(2), + /** + *
+     * A CancelJob request has been received, but is pending.
+     * 
+ * + * CANCEL_PENDING = 3; + */ + CANCEL_PENDING(3), + /** + *
+     * Transient in-flight resources have been canceled, and the request to
+     * cancel the running job has been issued to the cluster.
+     * 
+ * + * CANCEL_STARTED = 7; + */ + CANCEL_STARTED(7), + /** + *
+     * The job cancellation was successful.
+     * 
+ * + * CANCELLED = 4; + */ + CANCELLED(4), + /** + *
+     * The job has completed successfully.
+     * 
+ * + * DONE = 5; + */ + DONE(5), + /** + *
+     * The job has completed, but encountered an error.
+     * 
+ * + * ERROR = 6; + */ + ERROR(6), + /** + *
+     * Job attempt has failed. The detail field contains failure details for
+     * this attempt.
+     * Applies to restartable jobs only.
+     * 
+ * + * ATTEMPT_FAILURE = 9; + */ + ATTEMPT_FAILURE(9), + UNRECOGNIZED(-1), + ; + + /** + *
+     * The job state is unknown.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + /** + *
+     * The job is pending; it has been submitted, but is not yet running.
+     * 
+ * + * PENDING = 1; + */ + public static final int PENDING_VALUE = 1; + /** + *
+     * Job has been received by the service and completed initial setup;
+     * it will soon be submitted to the cluster.
+     * 
+ * + * SETUP_DONE = 8; + */ + public static final int SETUP_DONE_VALUE = 8; + /** + *
+     * The job is running on the cluster.
+     * 
+ * + * RUNNING = 2; + */ + public static final int RUNNING_VALUE = 2; + /** + *
+     * A CancelJob request has been received, but is pending.
+     * 
+ * + * CANCEL_PENDING = 3; + */ + public static final int CANCEL_PENDING_VALUE = 3; + /** + *
+     * Transient in-flight resources have been canceled, and the request to
+     * cancel the running job has been issued to the cluster.
+     * 
+ * + * CANCEL_STARTED = 7; + */ + public static final int CANCEL_STARTED_VALUE = 7; + /** + *
+     * The job cancellation was successful.
+     * 
+ * + * CANCELLED = 4; + */ + public static final int CANCELLED_VALUE = 4; + /** + *
+     * The job has completed successfully.
+     * 
+ * + * DONE = 5; + */ + public static final int DONE_VALUE = 5; + /** + *
+     * The job has completed, but encountered an error.
+     * 
+ * + * ERROR = 6; + */ + public static final int ERROR_VALUE = 6; + /** + *
+     * Job attempt has failed. The detail field contains failure details for
+     * this attempt.
+     * Applies to restartable jobs only.
+     * 
+ * + * ATTEMPT_FAILURE = 9; + */ + public static final int ATTEMPT_FAILURE_VALUE = 9; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + public static State forNumber(int value) { + switch (value) { + case 0: return STATE_UNSPECIFIED; + case 1: return PENDING; + case 8: return SETUP_DONE; + case 2: return RUNNING; + case 3: return CANCEL_PENDING; + case 7: return CANCEL_STARTED; + case 4: return CANCELLED; + case 5: return DONE; + case 6: return ERROR; + case 9: return ATTEMPT_FAILURE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + State> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobStatus.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.JobStatus.State) + } + + /** + *
+   * The job substate.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.JobStatus.Substate} + */ + public enum Substate + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * The job substate is unknown.
+     * 
+ * + * UNSPECIFIED = 0; + */ + UNSPECIFIED(0), + /** + *
+     * The Job is submitted to the agent.
+     * Applies to RUNNING state.
+     * 
+ * + * SUBMITTED = 1; + */ + SUBMITTED(1), + /** + *
+     * The Job has been received and is awaiting execution (it may be waiting
+     * for a condition to be met). See the "details" field for the reason for
+     * the delay.
+     * Applies to RUNNING state.
+     * 
+ * + * QUEUED = 2; + */ + QUEUED(2), + /** + *
+     * The agent-reported status is out of date, which may be caused by a
+     * loss of communication between the agent and Cloud Dataproc. If the
+     * agent does not send a timely update, the job will fail.
+     * Applies to RUNNING state.
+     * 
+ * + * STALE_STATUS = 3; + */ + STALE_STATUS(3), + UNRECOGNIZED(-1), + ; + + /** + *
+     * The job substate is unknown.
+     * 
+ * + * UNSPECIFIED = 0; + */ + public static final int UNSPECIFIED_VALUE = 0; + /** + *
+     * The Job is submitted to the agent.
+     * Applies to RUNNING state.
+     * 
+ * + * SUBMITTED = 1; + */ + public static final int SUBMITTED_VALUE = 1; + /** + *
+     * The Job has been received and is awaiting execution (it may be waiting
+     * for a condition to be met). See the "details" field for the reason for
+     * the delay.
+     * Applies to RUNNING state.
+     * 
+ * + * QUEUED = 2; + */ + public static final int QUEUED_VALUE = 2; + /** + *
+     * The agent-reported status is out of date, which may be caused by a
+     * loss of communication between the agent and Cloud Dataproc. If the
+     * agent does not send a timely update, the job will fail.
+     * Applies to RUNNING state.
+     * 
+ * + * STALE_STATUS = 3; + */ + public static final int STALE_STATUS_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Substate valueOf(int value) { + return forNumber(value); + } + + public static Substate forNumber(int value) { + switch (value) { + case 0: return UNSPECIFIED; + case 1: return SUBMITTED; + case 2: return QUEUED; + case 3: return STALE_STATUS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + Substate> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Substate findValueByNumber(int number) { + return Substate.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobStatus.getDescriptor().getEnumTypes().get(1); + } + + private static final Substate[] VALUES = values(); + + public static Substate valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Substate(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.JobStatus.Substate) + } + + public static final int STATE_FIELD_NUMBER = 1; + private int state_; + /** + *
+   * Output only. A state message specifying the overall job state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public int getStateValue() { + return state_; + } + /** + *
+   * Output only. A state message specifying the overall job state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.JobStatus.State result = com.google.cloud.dataproc.v1beta2.JobStatus.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.JobStatus.State.UNRECOGNIZED : result; + } + + public static final int DETAILS_FIELD_NUMBER = 2; + private volatile java.lang.Object details_; + /** + *
+   * Output only. Optional job state details, such as an error
+   * description if the state is <code>ERROR</code>.
+   * 
+ * + * string details = 2; + */ + public java.lang.String getDetails() { + java.lang.Object ref = details_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + details_ = s; + return s; + } + } + /** + *
+   * Output only. Optional job state details, such as an error
+   * description if the state is <code>ERROR</code>.
+   * 
+ * + * string details = 2; + */ + public com.google.protobuf.ByteString + getDetailsBytes() { + java.lang.Object ref = details_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + details_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_START_TIME_FIELD_NUMBER = 6; + private com.google.protobuf.Timestamp stateStartTime_; + /** + *
+   * Output only. The time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public boolean hasStateStartTime() { + return stateStartTime_ != null; + } + /** + *
+   * Output only. The time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public com.google.protobuf.Timestamp getStateStartTime() { + return stateStartTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } + /** + *
+   * Output only. The time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + return getStateStartTime(); + } + + public static final int SUBSTATE_FIELD_NUMBER = 7; + private int substate_; + /** + *
+   * Output only. Additional state information, which includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public int getSubstateValue() { + return substate_; + } + /** + *
+   * Output only. Additional state information, which includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.Substate getSubstate() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.JobStatus.Substate result = com.google.cloud.dataproc.v1beta2.JobStatus.Substate.valueOf(substate_); + return result == null ? com.google.cloud.dataproc.v1beta2.JobStatus.Substate.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (state_ != com.google.cloud.dataproc.v1beta2.JobStatus.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, state_); + } + if (!getDetailsBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, details_); + } + if (stateStartTime_ != null) { + output.writeMessage(6, getStateStartTime()); + } + if (substate_ != com.google.cloud.dataproc.v1beta2.JobStatus.Substate.UNSPECIFIED.getNumber()) { + output.writeEnum(7, substate_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (state_ != com.google.cloud.dataproc.v1beta2.JobStatus.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_); + } + if (!getDetailsBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, details_); + } + if (stateStartTime_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, getStateStartTime()); + } + if (substate_ != com.google.cloud.dataproc.v1beta2.JobStatus.Substate.UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(7, substate_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.JobStatus)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.JobStatus other = (com.google.cloud.dataproc.v1beta2.JobStatus) obj; + + boolean result = true; + result = result && state_ == other.state_; + result = result && getDetails() + .equals(other.getDetails()); + result = result && (hasStateStartTime() == other.hasStateStartTime()); + if (hasStateStartTime()) { + result = result && getStateStartTime() + .equals(other.getStateStartTime()); + } + result = result && substate_ == other.substate_; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + DETAILS_FIELD_NUMBER; + hash = (53 * hash) + getDetails().hashCode(); + if (hasStateStartTime()) { + hash = (37 * hash) + STATE_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStateStartTime().hashCode(); + } + hash = (37 * hash) + SUBSTATE_FIELD_NUMBER; + hash = (53 * hash) + substate_; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.JobStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.JobStatus prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Cloud Dataproc job status.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.JobStatus} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.JobStatus) + com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobStatus_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.JobStatus.class, com.google.cloud.dataproc.v1beta2.JobStatus.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.JobStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + state_ = 0; + + details_ = ""; + + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + substate_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_JobStatus_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobStatus getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobStatus build() { + com.google.cloud.dataproc.v1beta2.JobStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobStatus buildPartial() { + com.google.cloud.dataproc.v1beta2.JobStatus result = new com.google.cloud.dataproc.v1beta2.JobStatus(this); + result.state_ = state_; + result.details_ = details_; + if (stateStartTimeBuilder_ == null) { + result.stateStartTime_ = stateStartTime_; + } else { + result.stateStartTime_ = stateStartTimeBuilder_.build(); + } + result.substate_ = substate_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.JobStatus) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.JobStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.JobStatus other) { + if (other == com.google.cloud.dataproc.v1beta2.JobStatus.getDefaultInstance()) return this; + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getDetails().isEmpty()) { + details_ = other.details_; + onChanged(); + } + if (other.hasStateStartTime()) { + mergeStateStartTime(other.getStateStartTime()); + } + if (other.substate_ != 0) { + setSubstateValue(other.getSubstateValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.JobStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.JobStatus) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int state_ = 0; + /** + *
+     * Output only. A state message specifying the overall job state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public int getStateValue() { + return state_; + } + /** + *
+     * Output only. A state message specifying the overall job state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public Builder setStateValue(int value) { + state_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. A state message specifying the overall job state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.JobStatus.State result = com.google.cloud.dataproc.v1beta2.JobStatus.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.JobStatus.State.UNRECOGNIZED : result; + } + /** + *
+     * Output only. A state message specifying the overall job state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public Builder setState(com.google.cloud.dataproc.v1beta2.JobStatus.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. A state message specifying the overall job state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object details_ = ""; + /** + *
+     * Output only. Optional job state details, such as an error
+     * description if the state is <code>ERROR</code>.
+     * 
+ * + * string details = 2; + */ + public java.lang.String getDetails() { + java.lang.Object ref = details_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + details_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. Optional job state details, such as an error
+     * description if the state is <code>ERROR</code>.
+     * 
+ * + * string details = 2; + */ + public com.google.protobuf.ByteString + getDetailsBytes() { + java.lang.Object ref = details_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + details_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. Optional job state details, such as an error
+     * description if the state is <code>ERROR</code>.
+     * 
+ * + * string details = 2; + */ + public Builder setDetails( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + details_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Optional job state details, such as an error
+     * description if the state is <code>ERROR</code>.
+     * 
+ * + * string details = 2; + */ + public Builder clearDetails() { + + details_ = getDefaultInstance().getDetails(); + onChanged(); + return this; + } + /** + *
+     * Output only. Optional job state details, such as an error
+     * description if the state is <code>ERROR</code>.
+     * 
+ * + * string details = 2; + */ + public Builder setDetailsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + details_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp stateStartTime_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> stateStartTimeBuilder_; + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public boolean hasStateStartTime() { + return stateStartTimeBuilder_ != null || stateStartTime_ != null; + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public com.google.protobuf.Timestamp getStateStartTime() { + if (stateStartTimeBuilder_ == null) { + return stateStartTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } else { + return stateStartTimeBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public Builder setStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stateStartTime_ = value; + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public Builder setStateStartTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = builderForValue.build(); + onChanged(); + } else { + stateStartTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public Builder mergeStateStartTime(com.google.protobuf.Timestamp value) { + if (stateStartTimeBuilder_ == null) { + if (stateStartTime_ != null) { + stateStartTime_ = + com.google.protobuf.Timestamp.newBuilder(stateStartTime_).mergeFrom(value).buildPartial(); + } else { + stateStartTime_ = value; + } + onChanged(); + } else { + stateStartTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public Builder clearStateStartTime() { + if (stateStartTimeBuilder_ == null) { + stateStartTime_ = null; + onChanged(); + } else { + stateStartTime_ = null; + stateStartTimeBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public com.google.protobuf.Timestamp.Builder getStateStartTimeBuilder() { + + onChanged(); + return getStateStartTimeFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { + if (stateStartTimeBuilder_ != null) { + return stateStartTimeBuilder_.getMessageOrBuilder(); + } else { + return stateStartTime_ == null ? + com.google.protobuf.Timestamp.getDefaultInstance() : stateStartTime_; + } + } + /** + *
+     * Output only. The time when this state was entered.
+     * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getStateStartTimeFieldBuilder() { + if (stateStartTimeBuilder_ == null) { + stateStartTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + getStateStartTime(), + getParentForChildren(), + isClean()); + stateStartTime_ = null; + } + return stateStartTimeBuilder_; + } + + private int substate_ = 0; + /** + *
+     * Output only. Additional state information, which includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public int getSubstateValue() { + return substate_; + } + /** + *
+     * Output only. Additional state information, which includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public Builder setSubstateValue(int value) { + substate_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. Additional state information, which includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public com.google.cloud.dataproc.v1beta2.JobStatus.Substate getSubstate() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.JobStatus.Substate result = com.google.cloud.dataproc.v1beta2.JobStatus.Substate.valueOf(substate_); + return result == null ? com.google.cloud.dataproc.v1beta2.JobStatus.Substate.UNRECOGNIZED : result; + } + /** + *
+     * Output only. Additional state information, which includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public Builder setSubstate(com.google.cloud.dataproc.v1beta2.JobStatus.Substate value) { + if (value == null) { + throw new NullPointerException(); + } + + substate_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. Additional state information, which includes
+     * status reported by the agent.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + public Builder clearSubstate() { + + substate_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.JobStatus) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobStatus) + private static final com.google.cloud.dataproc.v1beta2.JobStatus DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.JobStatus(); + } + + public static com.google.cloud.dataproc.v1beta2.JobStatus getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public JobStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new JobStatus(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.JobStatus getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatusOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatusOrBuilder.java new file mode 100644 index 000000000000..f5bbfa7ba6c5 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatusOrBuilder.java @@ -0,0 +1,90 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface JobStatusOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.JobStatus) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. A state message specifying the overall job state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + int getStateValue(); + /** + *
+   * Output only. A state message specifying the overall job state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.State state = 1; + */ + com.google.cloud.dataproc.v1beta2.JobStatus.State getState(); + + /** + *
+   * Output only. Optional job state details, such as an error
+   * description if the state is <code>ERROR</code>.
+   * 
+ * + * string details = 2; + */ + java.lang.String getDetails(); + /** + *
+   * Output only. Optional job state details, such as an error
+   * description if the state is <code>ERROR</code>.
+   * 
+ * + * string details = 2; + */ + com.google.protobuf.ByteString + getDetailsBytes(); + + /** + *
+   * Output only. The time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + boolean hasStateStartTime(); + /** + *
+   * Output only. The time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + com.google.protobuf.Timestamp getStateStartTime(); + /** + *
+   * Output only. The time when this state was entered.
+   * 
+ * + * .google.protobuf.Timestamp state_start_time = 6; + */ + com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder(); + + /** + *
+   * Output only. Additional state information, which includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + int getSubstateValue(); + /** + *
+   * Output only. Additional state information, which includes
+   * status reported by the agent.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobStatus.Substate substate = 7; + */ + com.google.cloud.dataproc.v1beta2.JobStatus.Substate getSubstate(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobsProto.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobsProto.java new file mode 100644 index 000000000000..a165a4fd8069 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobsProto.java @@ -0,0 +1,582 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public final class JobsProto { + private JobsProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_DriverLogLevelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_DriverLogLevelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SparkJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SparkJob_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SparkJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_QueryList_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_QueryList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_HiveJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_HiveJob_ScriptVariablesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_HiveJob_ScriptVariablesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_HiveJob_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_HiveJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_ScriptVariablesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_ScriptVariablesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_PigJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_PigJob_ScriptVariablesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_PigJob_ScriptVariablesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_PigJob_PropertiesEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_PigJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_JobPlacement_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_JobPlacement_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_JobStatus_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_JobStatus_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_JobReference_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_JobReference_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_YarnApplication_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_YarnApplication_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_Job_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_Job_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_Job_LabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_Job_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_JobScheduling_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_JobScheduling_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n(google/cloud/dataproc/v1beta2/jobs.pro" + + "to\022\035google.cloud.dataproc.v1beta2\032\034googl" + + "e/api/annotations.proto\032\033google/protobuf" + + "/empty.proto\032 google/protobuf/field_mask" + + ".proto\032\037google/protobuf/timestamp.proto\"" + + "\313\002\n\rLoggingConfig\022\\\n\021driver_log_levels\030\002" + + " \003(\0132A.google.cloud.dataproc.v1beta2.Log" + + "gingConfig.DriverLogLevelsEntry\032j\n\024Drive" + + "rLogLevelsEntry\022\013\n\003key\030\001 \001(\t\022A\n\005value\030\002 " + + "\001(\01622.google.cloud.dataproc.v1beta2.Logg" + + "ingConfig.Level:\0028\001\"p\n\005Level\022\025\n\021LEVEL_UN" + + "SPECIFIED\020\000\022\007\n\003ALL\020\001\022\t\n\005TRACE\020\002\022\t\n\005DEBUG" + + "\020\003\022\010\n\004INFO\020\004\022\010\n\004WARN\020\005\022\t\n\005ERROR\020\006\022\t\n\005FAT" + + "AL\020\007\022\007\n\003OFF\020\010\"\335\002\n\tHadoopJob\022\033\n\021main_jar_" + + "file_uri\030\001 \001(\tH\000\022\024\n\nmain_class\030\002 \001(\tH\000\022\014" + + "\n\004args\030\003 \003(\t\022\025\n\rjar_file_uris\030\004 \003(\t\022\021\n\tf" + + "ile_uris\030\005 \003(\t\022\024\n\014archive_uris\030\006 \003(\t\022L\n\n" + + "properties\030\007 \003(\01328.google.cloud.dataproc" + + ".v1beta2.HadoopJob.PropertiesEntry\022D\n\016lo" + + "gging_config\030\010 \001(\0132,.google.cloud.datapr" + + "oc.v1beta2.LoggingConfig\0321\n\017PropertiesEn" + + "try\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\010\n\006d" + + "river\"\333\002\n\010SparkJob\022\033\n\021main_jar_file_uri\030" + + "\001 \001(\tH\000\022\024\n\nmain_class\030\002 \001(\tH\000\022\014\n\004args\030\003 " + + "\003(\t\022\025\n\rjar_file_uris\030\004 \003(\t\022\021\n\tfile_uris\030" + + "\005 \003(\t\022\024\n\014archive_uris\030\006 \003(\t\022K\n\npropertie" + + "s\030\007 \003(\01327.google.cloud.dataproc.v1beta2." + + "SparkJob.PropertiesEntry\022D\n\016logging_conf" + + "ig\030\010 \001(\0132,.google.cloud.dataproc.v1beta2" + + ".LoggingConfig\0321\n\017PropertiesEntry\022\013\n\003key" + + "\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\010\n\006driver\"\332\002\n\n" + + "PySparkJob\022\034\n\024main_python_file_uri\030\001 \001(\t" + + "\022\014\n\004args\030\002 \003(\t\022\030\n\020python_file_uris\030\003 \003(\t" + + "\022\025\n\rjar_file_uris\030\004 \003(\t\022\021\n\tfile_uris\030\005 \003" + + "(\t\022\024\n\014archive_uris\030\006 \003(\t\022M\n\nproperties\030\007" + + " \003(\01329.google.cloud.dataproc.v1beta2.PyS" + + "parkJob.PropertiesEntry\022D\n\016logging_confi" + + "g\030\010 \001(\0132,.google.cloud.dataproc.v1beta2." + + "LoggingConfig\0321\n\017PropertiesEntry\022\013\n\003key\030" + + "\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\034\n\tQueryList\022\017\n" + + "\007queries\030\001 \003(\t\"\260\003\n\007HiveJob\022\030\n\016query_file" + + "_uri\030\001 \001(\tH\000\022>\n\nquery_list\030\002 \001(\0132(.googl" + + "e.cloud.dataproc.v1beta2.QueryListH\000\022\033\n\023" + + "continue_on_failure\030\003 \001(\010\022U\n\020script_vari" + + "ables\030\004 \003(\0132;.google.cloud.dataproc.v1be" + + "ta2.HiveJob.ScriptVariablesEntry\022J\n\nprop" + + "erties\030\005 \003(\01326.google.cloud.dataproc.v1b" + + "eta2.HiveJob.PropertiesEntry\022\025\n\rjar_file" + + "_uris\030\006 \003(\t\0326\n\024ScriptVariablesEntry\022\013\n\003k" + + "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017Propertie" + + "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\t" + + "\n\007queries\"\345\003\n\013SparkSqlJob\022\030\n\016query_file_" + + "uri\030\001 \001(\tH\000\022>\n\nquery_list\030\002 \001(\0132(.google" + + ".cloud.dataproc.v1beta2.QueryListH\000\022Y\n\020s" + + "cript_variables\030\003 \003(\0132?.google.cloud.dat" + + "aproc.v1beta2.SparkSqlJob.ScriptVariable" + + "sEntry\022N\n\nproperties\030\004 \003(\0132:.google.clou" + + "d.dataproc.v1beta2.SparkSqlJob.Propertie" + + "sEntry\022\025\n\rjar_file_uris\0308 \003(\t\022D\n\016logging" + + "_config\030\006 \001(\0132,.google.cloud.dataproc.v1" + + "beta2.LoggingConfig\0326\n\024ScriptVariablesEn" + + "try\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017P" + + "ropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001" + + "(\t:\0028\001B\t\n\007queries\"\363\003\n\006PigJob\022\030\n\016query_fi" + + "le_uri\030\001 \001(\tH\000\022>\n\nquery_list\030\002 \001(\0132(.goo" + + "gle.cloud.dataproc.v1beta2.QueryListH\000\022\033" + + "\n\023continue_on_failure\030\003 \001(\010\022T\n\020script_va" + + "riables\030\004 \003(\0132:.google.cloud.dataproc.v1" + + "beta2.PigJob.ScriptVariablesEntry\022I\n\npro" + + "perties\030\005 \003(\01325.google.cloud.dataproc.v1" + + "beta2.PigJob.PropertiesEntry\022\025\n\rjar_file" + + "_uris\030\006 \003(\t\022D\n\016logging_config\030\007 \001(\0132,.go" + + "ogle.cloud.dataproc.v1beta2.LoggingConfi" + + "g\0326\n\024ScriptVariablesEntry\022\013\n\003key\030\001 \001(\t\022\r" + + "\n\005value\030\002 \001(\t:\0028\001\0321\n\017PropertiesEntry\022\013\n\003" + + "key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007queries\"" + + ":\n\014JobPlacement\022\024\n\014cluster_name\030\001 \001(\t\022\024\n" + + "\014cluster_uuid\030\002 \001(\t\"\314\003\n\tJobStatus\022=\n\005sta" + + "te\030\001 \001(\0162..google.cloud.dataproc.v1beta2" + + ".JobStatus.State\022\017\n\007details\030\002 \001(\t\0224\n\020sta" + + "te_start_time\030\006 \001(\0132\032.google.protobuf.Ti" + + "mestamp\022C\n\010substate\030\007 \001(\01621.google.cloud" + + ".dataproc.v1beta2.JobStatus.Substate\"\251\001\n" + + "\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\013\n\007PENDING" + + "\020\001\022\016\n\nSETUP_DONE\020\010\022\013\n\007RUNNING\020\002\022\022\n\016CANCE" + + "L_PENDING\020\003\022\022\n\016CANCEL_STARTED\020\007\022\r\n\tCANCE" + + "LLED\020\004\022\010\n\004DONE\020\005\022\t\n\005ERROR\020\006\022\023\n\017ATTEMPT_F" + + "AILURE\020\t\"H\n\010Substate\022\017\n\013UNSPECIFIED\020\000\022\r\n" + + "\tSUBMITTED\020\001\022\n\n\006QUEUED\020\002\022\020\n\014STALE_STATUS" + + "\020\003\"2\n\014JobReference\022\022\n\nproject_id\030\001 \001(\t\022\016" + + "\n\006job_id\030\002 \001(\t\"\226\002\n\017YarnApplication\022\014\n\004na" + + "me\030\001 \001(\t\022C\n\005state\030\002 \001(\01624.google.cloud.d" + + "ataproc.v1beta2.YarnApplication.State\022\020\n" + + "\010progress\030\003 \001(\002\022\024\n\014tracking_url\030\004 \001(\t\"\207\001" + + "\n\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\007\n\003NEW\020\001\022" + + "\016\n\nNEW_SAVING\020\002\022\r\n\tSUBMITTED\020\003\022\014\n\010ACCEPT" + + "ED\020\004\022\013\n\007RUNNING\020\005\022\014\n\010FINISHED\020\006\022\n\n\006FAILE" + + "D\020\007\022\n\n\006KILLED\020\010\"\312\007\n\003Job\022>\n\treference\030\001 \001" + + "(\0132+.google.cloud.dataproc.v1beta2.JobRe" + + "ference\022>\n\tplacement\030\002 \001(\0132+.google.clou" + + "d.dataproc.v1beta2.JobPlacement\022>\n\nhadoo" + + "p_job\030\003 \001(\0132(.google.cloud.dataproc.v1be" + + "ta2.HadoopJobH\000\022<\n\tspark_job\030\004 \001(\0132\'.goo" + + "gle.cloud.dataproc.v1beta2.SparkJobH\000\022@\n" + + "\013pyspark_job\030\005 \001(\0132).google.cloud.datapr" + + "oc.v1beta2.PySparkJobH\000\022:\n\010hive_job\030\006 \001(" + + "\0132&.google.cloud.dataproc.v1beta2.HiveJo" + + "bH\000\0228\n\007pig_job\030\007 \001(\0132%.google.cloud.data" + + "proc.v1beta2.PigJobH\000\022C\n\rspark_sql_job\030\014" + + " \001(\0132*.google.cloud.dataproc.v1beta2.Spa" + + "rkSqlJobH\000\0228\n\006status\030\010 \001(\0132(.google.clou" + + "d.dataproc.v1beta2.JobStatus\022@\n\016status_h" + + "istory\030\r \003(\0132(.google.cloud.dataproc.v1b" + + "eta2.JobStatus\022I\n\021yarn_applications\030\t \003(" + + "\0132..google.cloud.dataproc.v1beta2.YarnAp" + + "plication\022\"\n\032driver_output_resource_uri\030" + + "\021 \001(\t\022 \n\030driver_control_files_uri\030\017 \001(\t\022" + + ">\n\006labels\030\022 \003(\0132..google.cloud.dataproc." + + "v1beta2.Job.LabelsEntry\022@\n\nscheduling\030\024 " + + "\001(\0132,.google.cloud.dataproc.v1beta2.JobS" + + "cheduling\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\n\n\010type_job\".\n\rJobSched" + + "uling\022\035\n\025max_failures_per_hour\030\001 \001(\005\"{\n\020" + + "SubmitJobRequest\022\022\n\nproject_id\030\001 \001(\t\022\016\n\006" + + "region\030\003 \001(\t\022/\n\003job\030\002 \001(\0132\".google.cloud" + + ".dataproc.v1beta2.Job\022\022\n\nrequest_id\030\004 \001(" + + "\t\"C\n\rGetJobRequest\022\022\n\nproject_id\030\001 \001(\t\022\016" + + "\n\006region\030\003 \001(\t\022\016\n\006job_id\030\002 \001(\t\"\225\002\n\017ListJ" + + "obsRequest\022\022\n\nproject_id\030\001 \001(\t\022\016\n\006region" + + "\030\006 \001(\t\022\021\n\tpage_size\030\002 \001(\005\022\022\n\npage_token\030" + + "\003 \001(\t\022\024\n\014cluster_name\030\004 \001(\t\022Y\n\021job_state" + + "_matcher\030\005 \001(\0162>.google.cloud.dataproc.v" + + "1beta2.ListJobsRequest.JobStateMatcher\022\016" + + "\n\006filter\030\007 \001(\t\"6\n\017JobStateMatcher\022\007\n\003ALL" + + "\020\000\022\n\n\006ACTIVE\020\001\022\016\n\nNON_ACTIVE\020\002\"\250\001\n\020Updat" + + "eJobRequest\022\022\n\nproject_id\030\001 \001(\t\022\016\n\006regio" + + "n\030\002 \001(\t\022\016\n\006job_id\030\003 \001(\t\022/\n\003job\030\004 \001(\0132\".g" + + "oogle.cloud.dataproc.v1beta2.Job\022/\n\013upda" + + "te_mask\030\005 \001(\0132\032.google.protobuf.FieldMas" + + "k\"]\n\020ListJobsResponse\0220\n\004jobs\030\001 \003(\0132\".go" + + "ogle.cloud.dataproc.v1beta2.Job\022\027\n\017next_" + + "page_token\030\002 \001(\t\"F\n\020CancelJobRequest\022\022\n\n" + + "project_id\030\001 \001(\t\022\016\n\006region\030\003 \001(\t\022\016\n\006job_" + + "id\030\002 \001(\t\"F\n\020DeleteJobRequest\022\022\n\nproject_" + + "id\030\001 \001(\t\022\016\n\006region\030\003 \001(\t\022\016\n\006job_id\030\002 \001(\t" + + "2\213\010\n\rJobController\022\250\001\n\tSubmitJob\022/.googl" + + "e.cloud.dataproc.v1beta2.SubmitJobReques" + + "t\032\".google.cloud.dataproc.v1beta2.Job\"F\202" + + "\323\344\223\002@\";/v1beta2/projects/{project_id}/re" + + "gions/{region}/jobs:submit:\001*\022\241\001\n\006GetJob" + + "\022,.google.cloud.dataproc.v1beta2.GetJobR" + + "equest\032\".google.cloud.dataproc.v1beta2.J" + + "ob\"E\202\323\344\223\002?\022=/v1beta2/projects/{project_i" + + "d}/regions/{region}/jobs/{job_id}\022\251\001\n\010Li" + + "stJobs\022..google.cloud.dataproc.v1beta2.L" + + "istJobsRequest\032/.google.cloud.dataproc.v" + + "1beta2.ListJobsResponse\"<\202\323\344\223\0026\0224/v1beta" + + "2/projects/{project_id}/regions/{region}" + + "/jobs\022\254\001\n\tUpdateJob\022/.google.cloud.datap" + + "roc.v1beta2.UpdateJobRequest\032\".google.cl" + + "oud.dataproc.v1beta2.Job\"J\202\323\344\223\002D2=/v1bet" + + "a2/projects/{project_id}/regions/{region" + + "}/jobs/{job_id}:\003job\022\261\001\n\tCancelJob\022/.goo" + + "gle.cloud.dataproc.v1beta2.CancelJobRequ" + + "est\032\".google.cloud.dataproc.v1beta2.Job\"" + + "O\202\323\344\223\002I\"D/v1beta2/projects/{project_id}/" + + "regions/{region}/jobs/{job_id}:cancel:\001*" + + "\022\233\001\n\tDeleteJob\022/.google.cloud.dataproc.v" + + "1beta2.DeleteJobRequest\032\026.google.protobu" + + "f.Empty\"E\202\323\344\223\002?*=/v1beta2/projects/{proj" + + "ect_id}/regions/{region}/jobs/{job_id}Bw" + + "\n!com.google.cloud.dataproc.v1beta2B\tJob" + + "sProtoP\001ZEgoogle.golang.org/genproto/goo" + + "gleapis/cloud/dataproc/v1beta2;dataprocb" + + "\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }, assigner); + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor, + new java.lang.String[] { "DriverLogLevels", }); + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_DriverLogLevelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_DriverLogLevelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_DriverLogLevelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor, + new java.lang.String[] { "MainJarFileUri", "MainClass", "Args", "JarFileUris", "FileUris", "ArchiveUris", "Properties", "LoggingConfig", "Driver", }); + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_HadoopJob_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_dataproc_v1beta2_SparkJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor, + new java.lang.String[] { "MainJarFileUri", "MainClass", "Args", "JarFileUris", "FileUris", "ArchiveUris", "Properties", "LoggingConfig", "Driver", }); + internal_static_google_cloud_dataproc_v1beta2_SparkJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_SparkJob_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SparkJob_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor, + new java.lang.String[] { "MainPythonFileUri", "Args", "PythonFileUris", "JarFileUris", "FileUris", "ArchiveUris", "Properties", "LoggingConfig", }); + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_PySparkJob_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_QueryList_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_dataproc_v1beta2_QueryList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_QueryList_descriptor, + new java.lang.String[] { "Queries", }); + internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_dataproc_v1beta2_HiveJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor, + new java.lang.String[] { "QueryFileUri", "QueryList", "ContinueOnFailure", "ScriptVariables", "Properties", "JarFileUris", "Queries", }); + internal_static_google_cloud_dataproc_v1beta2_HiveJob_ScriptVariablesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_HiveJob_ScriptVariablesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_HiveJob_ScriptVariablesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_HiveJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_HiveJob_descriptor.getNestedTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_HiveJob_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_HiveJob_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor, + new java.lang.String[] { "QueryFileUri", "QueryList", "ScriptVariables", "Properties", "JarFileUris", "LoggingConfig", "Queries", }); + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_ScriptVariablesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_ScriptVariablesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_ScriptVariablesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor.getNestedTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_dataproc_v1beta2_PigJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor, + new java.lang.String[] { "QueryFileUri", "QueryList", "ContinueOnFailure", "ScriptVariables", "Properties", "JarFileUris", "LoggingConfig", "Queries", }); + internal_static_google_cloud_dataproc_v1beta2_PigJob_ScriptVariablesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_PigJob_ScriptVariablesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_PigJob_ScriptVariablesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_PigJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor.getNestedTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_PigJob_PropertiesEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_PigJob_PropertiesEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_JobPlacement_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_dataproc_v1beta2_JobPlacement_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_JobPlacement_descriptor, + new java.lang.String[] { "ClusterName", "ClusterUuid", }); + internal_static_google_cloud_dataproc_v1beta2_JobStatus_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_dataproc_v1beta2_JobStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_JobStatus_descriptor, + new java.lang.String[] { "State", "Details", "StateStartTime", "Substate", }); + internal_static_google_cloud_dataproc_v1beta2_JobReference_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_dataproc_v1beta2_JobReference_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_JobReference_descriptor, + new java.lang.String[] { "ProjectId", "JobId", }); + internal_static_google_cloud_dataproc_v1beta2_YarnApplication_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_dataproc_v1beta2_YarnApplication_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_YarnApplication_descriptor, + new java.lang.String[] { "Name", "State", "Progress", "TrackingUrl", }); + internal_static_google_cloud_dataproc_v1beta2_Job_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_dataproc_v1beta2_Job_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_Job_descriptor, + new java.lang.String[] { "Reference", "Placement", "HadoopJob", "SparkJob", "PysparkJob", "HiveJob", "PigJob", "SparkSqlJob", "Status", "StatusHistory", "YarnApplications", "DriverOutputResourceUri", "DriverControlFilesUri", "Labels", "Scheduling", "TypeJob", }); + internal_static_google_cloud_dataproc_v1beta2_Job_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_Job_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_Job_LabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_Job_LabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_JobScheduling_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_dataproc_v1beta2_JobScheduling_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_JobScheduling_descriptor, + new java.lang.String[] { "MaxFailuresPerHour", }); + internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "Job", "RequestId", }); + internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_GetJobRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "JobId", }); + internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "PageSize", "PageToken", "ClusterName", "JobStateMatcher", "Filter", }); + internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "JobId", "Job", "UpdateMask", }); + internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_descriptor, + new java.lang.String[] { "Jobs", "NextPageToken", }); + internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "JobId", }); + internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_DeleteJobRequest_descriptor, + new java.lang.String[] { "ProjectId", "Region", "JobId", }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.AnnotationsProto.http); + com.google.protobuf.Descriptors.FileDescriptor + .internalUpdateFileDescriptor(descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfig.java new file mode 100644 index 000000000000..c66339584d8d --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfig.java @@ -0,0 +1,1247 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies the cluster auto delete related schedule configuration.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.LifecycleConfig} + */ +public final class LifecycleConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.LifecycleConfig) + LifecycleConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use LifecycleConfig.newBuilder() to construct. + private LifecycleConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private LifecycleConfig() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LifecycleConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + com.google.protobuf.Duration.Builder subBuilder = null; + if (idleDeleteTtl_ != null) { + subBuilder = idleDeleteTtl_.toBuilder(); + } + idleDeleteTtl_ = input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(idleDeleteTtl_); + idleDeleteTtl_ = subBuilder.buildPartial(); + } + + break; + } + case 18: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (ttlCase_ == 2) { + subBuilder = ((com.google.protobuf.Timestamp) ttl_).toBuilder(); + } + ttl_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.protobuf.Timestamp) ttl_); + ttl_ = subBuilder.buildPartial(); + } + ttlCase_ = 2; + break; + } + case 26: { + com.google.protobuf.Duration.Builder subBuilder = null; + if (ttlCase_ == 3) { + subBuilder = ((com.google.protobuf.Duration) ttl_).toBuilder(); + } + ttl_ = + input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.protobuf.Duration) ttl_); + ttl_ = subBuilder.buildPartial(); + } + ttlCase_ = 3; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.LifecycleConfig.class, com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder.class); + } + + private int ttlCase_ = 0; + private java.lang.Object ttl_; + public enum TtlCase + implements com.google.protobuf.Internal.EnumLite { + AUTO_DELETE_TIME(2), + AUTO_DELETE_TTL(3), + TTL_NOT_SET(0); + private final int value; + private TtlCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TtlCase valueOf(int value) { + return forNumber(value); + } + + public static TtlCase forNumber(int value) { + switch (value) { + case 2: return AUTO_DELETE_TIME; + case 3: return AUTO_DELETE_TTL; + case 0: return TTL_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public TtlCase + getTtlCase() { + return TtlCase.forNumber( + ttlCase_); + } + + public static final int IDLE_DELETE_TTL_FIELD_NUMBER = 1; + private com.google.protobuf.Duration idleDeleteTtl_; + /** + *
+   * Optional. The longest duration that cluster would keep alive while staying
+   *  idle; passing this threshold will cause cluster to be auto-deleted.
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public boolean hasIdleDeleteTtl() { + return idleDeleteTtl_ != null; + } + /** + *
+   * Optional. The longest duration that cluster would keep alive while staying
+   *  idle; passing this threshold will cause cluster to be auto-deleted.
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public com.google.protobuf.Duration getIdleDeleteTtl() { + return idleDeleteTtl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : idleDeleteTtl_; + } + /** + *
+   * Optional. The longest duration that cluster would keep alive while staying
+   *  idle; passing this threshold will cause cluster to be auto-deleted.
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder() { + return getIdleDeleteTtl(); + } + + public static final int AUTO_DELETE_TIME_FIELD_NUMBER = 2; + /** + *
+   * Optional. The time when cluster will be auto-deleted.
+   * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public boolean hasAutoDeleteTime() { + return ttlCase_ == 2; + } + /** + *
+   * Optional. The time when cluster will be auto-deleted.
+   * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public com.google.protobuf.Timestamp getAutoDeleteTime() { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + /** + *
+   * Optional. The time when cluster will be auto-deleted.
+   * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getAutoDeleteTimeOrBuilder() { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int AUTO_DELETE_TTL_FIELD_NUMBER = 3; + /** + *
+   * Optional. The life duration of cluster, the cluster will be auto-deleted
+   * at the end of this duration.
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public boolean hasAutoDeleteTtl() { + return ttlCase_ == 3; + } + /** + *
+   * Optional. The life duration of cluster, the cluster will be auto-deleted
+   * at the end of this duration.
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public com.google.protobuf.Duration getAutoDeleteTtl() { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + /** + *
+   * Optional. The life duration of cluster, the cluster will be auto-deleted
+   * at the end of this duration.
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public com.google.protobuf.DurationOrBuilder getAutoDeleteTtlOrBuilder() { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (idleDeleteTtl_ != null) { + output.writeMessage(1, getIdleDeleteTtl()); + } + if (ttlCase_ == 2) { + output.writeMessage(2, (com.google.protobuf.Timestamp) ttl_); + } + if (ttlCase_ == 3) { + output.writeMessage(3, (com.google.protobuf.Duration) ttl_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (idleDeleteTtl_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getIdleDeleteTtl()); + } + if (ttlCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, (com.google.protobuf.Timestamp) ttl_); + } + if (ttlCase_ == 3) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, (com.google.protobuf.Duration) ttl_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.LifecycleConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.LifecycleConfig other = (com.google.cloud.dataproc.v1beta2.LifecycleConfig) obj; + + boolean result = true; + result = result && (hasIdleDeleteTtl() == other.hasIdleDeleteTtl()); + if (hasIdleDeleteTtl()) { + result = result && getIdleDeleteTtl() + .equals(other.getIdleDeleteTtl()); + } + result = result && getTtlCase().equals( + other.getTtlCase()); + if (!result) return false; + switch (ttlCase_) { + case 2: + result = result && getAutoDeleteTime() + .equals(other.getAutoDeleteTime()); + break; + case 3: + result = result && getAutoDeleteTtl() + .equals(other.getAutoDeleteTtl()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasIdleDeleteTtl()) { + hash = (37 * hash) + IDLE_DELETE_TTL_FIELD_NUMBER; + hash = (53 * hash) + getIdleDeleteTtl().hashCode(); + } + switch (ttlCase_) { + case 2: + hash = (37 * hash) + AUTO_DELETE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getAutoDeleteTime().hashCode(); + break; + case 3: + hash = (37 * hash) + AUTO_DELETE_TTL_FIELD_NUMBER; + hash = (53 * hash) + getAutoDeleteTtl().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.LifecycleConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies the cluster auto delete related schedule configuration.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.LifecycleConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.LifecycleConfig) + com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.LifecycleConfig.class, com.google.cloud.dataproc.v1beta2.LifecycleConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.LifecycleConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtl_ = null; + } else { + idleDeleteTtl_ = null; + idleDeleteTtlBuilder_ = null; + } + ttlCase_ = 0; + ttl_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_LifecycleConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LifecycleConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.LifecycleConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LifecycleConfig build() { + com.google.cloud.dataproc.v1beta2.LifecycleConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LifecycleConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.LifecycleConfig result = new com.google.cloud.dataproc.v1beta2.LifecycleConfig(this); + if (idleDeleteTtlBuilder_ == null) { + result.idleDeleteTtl_ = idleDeleteTtl_; + } else { + result.idleDeleteTtl_ = idleDeleteTtlBuilder_.build(); + } + if (ttlCase_ == 2) { + if (autoDeleteTimeBuilder_ == null) { + result.ttl_ = ttl_; + } else { + result.ttl_ = autoDeleteTimeBuilder_.build(); + } + } + if (ttlCase_ == 3) { + if (autoDeleteTtlBuilder_ == null) { + result.ttl_ = ttl_; + } else { + result.ttl_ = autoDeleteTtlBuilder_.build(); + } + } + result.ttlCase_ = ttlCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.LifecycleConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.LifecycleConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.LifecycleConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.LifecycleConfig.getDefaultInstance()) return this; + if (other.hasIdleDeleteTtl()) { + mergeIdleDeleteTtl(other.getIdleDeleteTtl()); + } + switch (other.getTtlCase()) { + case AUTO_DELETE_TIME: { + mergeAutoDeleteTime(other.getAutoDeleteTime()); + break; + } + case AUTO_DELETE_TTL: { + mergeAutoDeleteTtl(other.getAutoDeleteTtl()); + break; + } + case TTL_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.LifecycleConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.LifecycleConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int ttlCase_ = 0; + private java.lang.Object ttl_; + public TtlCase + getTtlCase() { + return TtlCase.forNumber( + ttlCase_); + } + + public Builder clearTtl() { + ttlCase_ = 0; + ttl_ = null; + onChanged(); + return this; + } + + + private com.google.protobuf.Duration idleDeleteTtl_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> idleDeleteTtlBuilder_; + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public boolean hasIdleDeleteTtl() { + return idleDeleteTtlBuilder_ != null || idleDeleteTtl_ != null; + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public com.google.protobuf.Duration getIdleDeleteTtl() { + if (idleDeleteTtlBuilder_ == null) { + return idleDeleteTtl_ == null ? com.google.protobuf.Duration.getDefaultInstance() : idleDeleteTtl_; + } else { + return idleDeleteTtlBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public Builder setIdleDeleteTtl(com.google.protobuf.Duration value) { + if (idleDeleteTtlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + idleDeleteTtl_ = value; + onChanged(); + } else { + idleDeleteTtlBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public Builder setIdleDeleteTtl( + com.google.protobuf.Duration.Builder builderForValue) { + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtl_ = builderForValue.build(); + onChanged(); + } else { + idleDeleteTtlBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public Builder mergeIdleDeleteTtl(com.google.protobuf.Duration value) { + if (idleDeleteTtlBuilder_ == null) { + if (idleDeleteTtl_ != null) { + idleDeleteTtl_ = + com.google.protobuf.Duration.newBuilder(idleDeleteTtl_).mergeFrom(value).buildPartial(); + } else { + idleDeleteTtl_ = value; + } + onChanged(); + } else { + idleDeleteTtlBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public Builder clearIdleDeleteTtl() { + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtl_ = null; + onChanged(); + } else { + idleDeleteTtl_ = null; + idleDeleteTtlBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public com.google.protobuf.Duration.Builder getIdleDeleteTtlBuilder() { + + onChanged(); + return getIdleDeleteTtlFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + public com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder() { + if (idleDeleteTtlBuilder_ != null) { + return idleDeleteTtlBuilder_.getMessageOrBuilder(); + } else { + return idleDeleteTtl_ == null ? + com.google.protobuf.Duration.getDefaultInstance() : idleDeleteTtl_; + } + } + /** + *
+     * Optional. The longest duration that cluster would keep alive while staying
+     *  idle; passing this threshold will cause cluster to be auto-deleted.
+     * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> + getIdleDeleteTtlFieldBuilder() { + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtlBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder>( + getIdleDeleteTtl(), + getParentForChildren(), + isClean()); + idleDeleteTtl_ = null; + } + return idleDeleteTtlBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> autoDeleteTimeBuilder_; + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public boolean hasAutoDeleteTime() { + return ttlCase_ == 2; + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public com.google.protobuf.Timestamp getAutoDeleteTime() { + if (autoDeleteTimeBuilder_ == null) { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (ttlCase_ == 2) { + return autoDeleteTimeBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public Builder setAutoDeleteTime(com.google.protobuf.Timestamp value) { + if (autoDeleteTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + onChanged(); + } else { + autoDeleteTimeBuilder_.setMessage(value); + } + ttlCase_ = 2; + return this; + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public Builder setAutoDeleteTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (autoDeleteTimeBuilder_ == null) { + ttl_ = builderForValue.build(); + onChanged(); + } else { + autoDeleteTimeBuilder_.setMessage(builderForValue.build()); + } + ttlCase_ = 2; + return this; + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public Builder mergeAutoDeleteTime(com.google.protobuf.Timestamp value) { + if (autoDeleteTimeBuilder_ == null) { + if (ttlCase_ == 2 && + ttl_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + ttl_ = com.google.protobuf.Timestamp.newBuilder((com.google.protobuf.Timestamp) ttl_) + .mergeFrom(value).buildPartial(); + } else { + ttl_ = value; + } + onChanged(); + } else { + if (ttlCase_ == 2) { + autoDeleteTimeBuilder_.mergeFrom(value); + } + autoDeleteTimeBuilder_.setMessage(value); + } + ttlCase_ = 2; + return this; + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public Builder clearAutoDeleteTime() { + if (autoDeleteTimeBuilder_ == null) { + if (ttlCase_ == 2) { + ttlCase_ = 0; + ttl_ = null; + onChanged(); + } + } else { + if (ttlCase_ == 2) { + ttlCase_ = 0; + ttl_ = null; + } + autoDeleteTimeBuilder_.clear(); + } + return this; + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getAutoDeleteTimeBuilder() { + return getAutoDeleteTimeFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getAutoDeleteTimeOrBuilder() { + if ((ttlCase_ == 2) && (autoDeleteTimeBuilder_ != null)) { + return autoDeleteTimeBuilder_.getMessageOrBuilder(); + } else { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + /** + *
+     * Optional. The time when cluster will be auto-deleted.
+     * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getAutoDeleteTimeFieldBuilder() { + if (autoDeleteTimeBuilder_ == null) { + if (!(ttlCase_ == 2)) { + ttl_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + autoDeleteTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) ttl_, + getParentForChildren(), + isClean()); + ttl_ = null; + } + ttlCase_ = 2; + onChanged();; + return autoDeleteTimeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> autoDeleteTtlBuilder_; + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public boolean hasAutoDeleteTtl() { + return ttlCase_ == 3; + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public com.google.protobuf.Duration getAutoDeleteTtl() { + if (autoDeleteTtlBuilder_ == null) { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } else { + if (ttlCase_ == 3) { + return autoDeleteTtlBuilder_.getMessage(); + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public Builder setAutoDeleteTtl(com.google.protobuf.Duration value) { + if (autoDeleteTtlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + onChanged(); + } else { + autoDeleteTtlBuilder_.setMessage(value); + } + ttlCase_ = 3; + return this; + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public Builder setAutoDeleteTtl( + com.google.protobuf.Duration.Builder builderForValue) { + if (autoDeleteTtlBuilder_ == null) { + ttl_ = builderForValue.build(); + onChanged(); + } else { + autoDeleteTtlBuilder_.setMessage(builderForValue.build()); + } + ttlCase_ = 3; + return this; + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public Builder mergeAutoDeleteTtl(com.google.protobuf.Duration value) { + if (autoDeleteTtlBuilder_ == null) { + if (ttlCase_ == 3 && + ttl_ != com.google.protobuf.Duration.getDefaultInstance()) { + ttl_ = com.google.protobuf.Duration.newBuilder((com.google.protobuf.Duration) ttl_) + .mergeFrom(value).buildPartial(); + } else { + ttl_ = value; + } + onChanged(); + } else { + if (ttlCase_ == 3) { + autoDeleteTtlBuilder_.mergeFrom(value); + } + autoDeleteTtlBuilder_.setMessage(value); + } + ttlCase_ = 3; + return this; + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public Builder clearAutoDeleteTtl() { + if (autoDeleteTtlBuilder_ == null) { + if (ttlCase_ == 3) { + ttlCase_ = 0; + ttl_ = null; + onChanged(); + } + } else { + if (ttlCase_ == 3) { + ttlCase_ = 0; + ttl_ = null; + } + autoDeleteTtlBuilder_.clear(); + } + return this; + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public com.google.protobuf.Duration.Builder getAutoDeleteTtlBuilder() { + return getAutoDeleteTtlFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + public com.google.protobuf.DurationOrBuilder getAutoDeleteTtlOrBuilder() { + if ((ttlCase_ == 3) && (autoDeleteTtlBuilder_ != null)) { + return autoDeleteTtlBuilder_.getMessageOrBuilder(); + } else { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + /** + *
+     * Optional. The life duration of cluster, the cluster will be auto-deleted
+     * at the end of this duration.
+     * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> + getAutoDeleteTtlFieldBuilder() { + if (autoDeleteTtlBuilder_ == null) { + if (!(ttlCase_ == 3)) { + ttl_ = com.google.protobuf.Duration.getDefaultInstance(); + } + autoDeleteTtlBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder>( + (com.google.protobuf.Duration) ttl_, + getParentForChildren(), + isClean()); + ttl_ = null; + } + ttlCase_ = 3; + onChanged();; + return autoDeleteTtlBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.LifecycleConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.LifecycleConfig) + private static final com.google.cloud.dataproc.v1beta2.LifecycleConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.LifecycleConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.LifecycleConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public LifecycleConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LifecycleConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LifecycleConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfigOrBuilder.java new file mode 100644 index 000000000000..752d81517534 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfigOrBuilder.java @@ -0,0 +1,92 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface LifecycleConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.LifecycleConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. The longest duration that cluster would keep alive while staying
+   *  idle; passing this threshold will cause cluster to be auto-deleted.
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + boolean hasIdleDeleteTtl(); + /** + *
+   * Optional. The longest duration that cluster would keep alive while staying
+   *  idle; passing this threshold will cause cluster to be auto-deleted.
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + com.google.protobuf.Duration getIdleDeleteTtl(); + /** + *
+   * Optional. The longest duration that cluster would keep alive while staying
+   *  idle; passing this threshold will cause cluster to be auto-deleted.
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1; + */ + com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder(); + + /** + *
+   * Optional. The time when cluster will be auto-deleted.
+   * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + boolean hasAutoDeleteTime(); + /** + *
+   * Optional. The time when cluster will be auto-deleted.
+   * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + com.google.protobuf.Timestamp getAutoDeleteTime(); + /** + *
+   * Optional. The time when cluster will be auto-deleted.
+   * 
+ * + * .google.protobuf.Timestamp auto_delete_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getAutoDeleteTimeOrBuilder(); + + /** + *
+   * Optional. The life duration of cluster, the cluster will be auto-deleted
+   * at the end of this duration.
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + boolean hasAutoDeleteTtl(); + /** + *
+   * Optional. The life duration of cluster, the cluster will be auto-deleted
+   * at the end of this duration.
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + com.google.protobuf.Duration getAutoDeleteTtl(); + /** + *
+   * Optional. The life duration of cluster, the cluster will be auto-deleted
+   * at the end of this duration.
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3; + */ + com.google.protobuf.DurationOrBuilder getAutoDeleteTtlOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.LifecycleConfig.TtlCase getTtlCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequest.java new file mode 100644 index 000000000000..e90416531d4c --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequest.java @@ -0,0 +1,1221 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to list the clusters in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListClustersRequest} + */ +public final class ListClustersRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ListClustersRequest) + ListClustersRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListClustersRequest.newBuilder() to construct. + private ListClustersRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListClustersRequest() { + projectId_ = ""; + region_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListClustersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 16: { + + pageSize_ = input.readInt32(); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + pageToken_ = s; + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + + filter_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListClustersRequest.class, com.google.cloud.dataproc.v1beta2.ListClustersRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 4; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 4; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 4; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 5; + private volatile java.lang.Object filter_; + /** + *
+   * Optional. A filter constraining the clusters to list. Filters are
+   * case-sensitive and have the following syntax:
+   * field = value [AND [field = value]] ...
+   * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+   * and `[KEY]` is a label key. **value** can be `*` to match all values.
+   * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+   * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+   * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+   * contains the `DELETING` and `ERROR` states.
+   * `clusterName` is the name of the cluster provided at creation time.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND clusterName = mycluster
+   * AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 5; + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + /** + *
+   * Optional. A filter constraining the clusters to list. Filters are
+   * case-sensitive and have the following syntax:
+   * field = value [AND [field = value]] ...
+   * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+   * and `[KEY]` is a label key. **value** can be `*` to match all values.
+   * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+   * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+   * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+   * contains the `DELETING` and `ERROR` states.
+   * `clusterName` is the name of the cluster provided at creation time.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND clusterName = mycluster
+   * AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 5; + */ + public com.google.protobuf.ByteString + getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_; + /** + *
+   * Optional. The standard List page size.
+   * 
+ * + * int32 page_size = 2; + */ + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + private volatile java.lang.Object pageToken_; + /** + *
+   * Optional. The standard List page token.
+   * 
+ * + * string page_token = 3; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + *
+   * Optional. The standard List page token.
+   * 
+ * + * string page_token = 3; + */ + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!getPageTokenBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pageToken_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, region_); + } + if (!getFilterBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, filter_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, pageSize_); + } + if (!getPageTokenBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, pageToken_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, region_); + } + if (!getFilterBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, filter_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ListClustersRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ListClustersRequest other = (com.google.cloud.dataproc.v1beta2.ListClustersRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getFilter() + .equals(other.getFilter()); + result = result && (getPageSize() + == other.getPageSize()); + result = result && getPageToken() + .equals(other.getPageToken()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ListClustersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to list the clusters in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListClustersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ListClustersRequest) + com.google.cloud.dataproc.v1beta2.ListClustersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListClustersRequest.class, com.google.cloud.dataproc.v1beta2.ListClustersRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ListClustersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + filter_ = ""; + + pageSize_ = 0; + + pageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ListClustersRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersRequest build() { + com.google.cloud.dataproc.v1beta2.ListClustersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.ListClustersRequest result = new com.google.cloud.dataproc.v1beta2.ListClustersRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.filter_ = filter_; + result.pageSize_ = pageSize_; + result.pageToken_ = pageToken_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ListClustersRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ListClustersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ListClustersRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.ListClustersRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ListClustersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ListClustersRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the cluster
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 4; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 4; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 4; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 4; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 4; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + /** + *
+     * Optional. A filter constraining the clusters to list. Filters are
+     * case-sensitive and have the following syntax:
+     * field = value [AND [field = value]] ...
+     * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+     * and `[KEY]` is a label key. **value** can be `*` to match all values.
+     * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+     * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+     * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+     * contains the `DELETING` and `ERROR` states.
+     * `clusterName` is the name of the cluster provided at creation time.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND clusterName = mycluster
+     * AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 5; + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A filter constraining the clusters to list. Filters are
+     * case-sensitive and have the following syntax:
+     * field = value [AND [field = value]] ...
+     * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+     * and `[KEY]` is a label key. **value** can be `*` to match all values.
+     * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+     * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+     * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+     * contains the `DELETING` and `ERROR` states.
+     * `clusterName` is the name of the cluster provided at creation time.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND clusterName = mycluster
+     * AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 5; + */ + public com.google.protobuf.ByteString + getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A filter constraining the clusters to list. Filters are
+     * case-sensitive and have the following syntax:
+     * field = value [AND [field = value]] ...
+     * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+     * and `[KEY]` is a label key. **value** can be `*` to match all values.
+     * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+     * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+     * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+     * contains the `DELETING` and `ERROR` states.
+     * `clusterName` is the name of the cluster provided at creation time.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND clusterName = mycluster
+     * AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 5; + */ + public Builder setFilter( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + filter_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A filter constraining the clusters to list. Filters are
+     * case-sensitive and have the following syntax:
+     * field = value [AND [field = value]] ...
+     * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+     * and `[KEY]` is a label key. **value** can be `*` to match all values.
+     * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+     * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+     * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+     * contains the `DELETING` and `ERROR` states.
+     * `clusterName` is the name of the cluster provided at creation time.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND clusterName = mycluster
+     * AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 5; + */ + public Builder clearFilter() { + + filter_ = getDefaultInstance().getFilter(); + onChanged(); + return this; + } + /** + *
+     * Optional. A filter constraining the clusters to list. Filters are
+     * case-sensitive and have the following syntax:
+     * field = value [AND [field = value]] ...
+     * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+     * and `[KEY]` is a label key. **value** can be `*` to match all values.
+     * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+     * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+     * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+     * contains the `DELETING` and `ERROR` states.
+     * `clusterName` is the name of the cluster provided at creation time.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND clusterName = mycluster
+     * AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 5; + */ + public Builder setFilterBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + filter_ = value; + onChanged(); + return this; + } + + private int pageSize_ ; + /** + *
+     * Optional. The standard List page size.
+     * 
+ * + * int32 page_size = 2; + */ + public int getPageSize() { + return pageSize_; + } + /** + *
+     * Optional. The standard List page size.
+     * 
+ * + * int32 page_size = 2; + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The standard List page size.
+     * 
+ * + * int32 page_size = 2; + */ + public Builder clearPageSize() { + + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + *
+     * Optional. The standard List page token.
+     * 
+ * + * string page_token = 3; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The standard List page token.
+     * 
+ * + * string page_token = 3; + */ + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The standard List page token.
+     * 
+ * + * string page_token = 3; + */ + public Builder setPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + pageToken_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The standard List page token.
+     * 
+ * + * string page_token = 3; + */ + public Builder clearPageToken() { + + pageToken_ = getDefaultInstance().getPageToken(); + onChanged(); + return this; + } + /** + *
+     * Optional. The standard List page token.
+     * 
+ * + * string page_token = 3; + */ + public Builder setPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + pageToken_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ListClustersRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListClustersRequest) + private static final com.google.cloud.dataproc.v1beta2.ListClustersRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ListClustersRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.ListClustersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListClustersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListClustersRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequestOrBuilder.java new file mode 100644 index 000000000000..ce18f7a88cb6 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequestOrBuilder.java @@ -0,0 +1,120 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ListClustersRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ListClustersRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the cluster
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 4; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 4; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Optional. A filter constraining the clusters to list. Filters are
+   * case-sensitive and have the following syntax:
+   * field = value [AND [field = value]] ...
+   * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+   * and `[KEY]` is a label key. **value** can be `*` to match all values.
+   * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+   * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+   * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+   * contains the `DELETING` and `ERROR` states.
+   * `clusterName` is the name of the cluster provided at creation time.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND clusterName = mycluster
+   * AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 5; + */ + java.lang.String getFilter(); + /** + *
+   * Optional. A filter constraining the clusters to list. Filters are
+   * case-sensitive and have the following syntax:
+   * field = value [AND [field = value]] ...
+   * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
+   * and `[KEY]` is a label key. **value** can be `*` to match all values.
+   * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
+   * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
+   * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
+   * contains the `DELETING` and `ERROR` states.
+   * `clusterName` is the name of the cluster provided at creation time.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND clusterName = mycluster
+   * AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 5; + */ + com.google.protobuf.ByteString + getFilterBytes(); + + /** + *
+   * Optional. The standard List page size.
+   * 
+ * + * int32 page_size = 2; + */ + int getPageSize(); + + /** + *
+   * Optional. The standard List page token.
+   * 
+ * + * string page_token = 3; + */ + java.lang.String getPageToken(); + /** + *
+   * Optional. The standard List page token.
+   * 
+ * + * string page_token = 3; + */ + com.google.protobuf.ByteString + getPageTokenBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponse.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponse.java new file mode 100644 index 000000000000..573f089c6057 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponse.java @@ -0,0 +1,1031 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The list of all clusters in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListClustersResponse} + */ +public final class ListClustersResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ListClustersResponse) + ListClustersResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListClustersResponse.newBuilder() to construct. + private ListClustersResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListClustersResponse() { + clusters_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListClustersResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + clusters_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + clusters_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.Cluster.parser(), extensionRegistry)); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + nextPageToken_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + clusters_ = java.util.Collections.unmodifiableList(clusters_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListClustersResponse.class, com.google.cloud.dataproc.v1beta2.ListClustersResponse.Builder.class); + } + + private int bitField0_; + public static final int CLUSTERS_FIELD_NUMBER = 1; + private java.util.List clusters_; + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public java.util.List getClustersList() { + return clusters_; + } + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public java.util.List + getClustersOrBuilderList() { + return clusters_; + } + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public int getClustersCount() { + return clusters_.size(); + } + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.Cluster getClusters(int index) { + return clusters_.get(index); + } + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClustersOrBuilder( + int index) { + return clusters_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + private volatile java.lang.Object nextPageToken_; + /** + *
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListClustersRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + *
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListClustersRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < clusters_.size(); i++) { + output.writeMessage(1, clusters_.get(i)); + } + if (!getNextPageTokenBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < clusters_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, clusters_.get(i)); + } + if (!getNextPageTokenBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ListClustersResponse)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ListClustersResponse other = (com.google.cloud.dataproc.v1beta2.ListClustersResponse) obj; + + boolean result = true; + result = result && getClustersList() + .equals(other.getClustersList()); + result = result && getNextPageToken() + .equals(other.getNextPageToken()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getClustersCount() > 0) { + hash = (37 * hash) + CLUSTERS_FIELD_NUMBER; + hash = (53 * hash) + getClustersList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ListClustersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The list of all clusters in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListClustersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ListClustersResponse) + com.google.cloud.dataproc.v1beta2.ListClustersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListClustersResponse.class, com.google.cloud.dataproc.v1beta2.ListClustersResponse.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ListClustersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getClustersFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (clustersBuilder_ == null) { + clusters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + clustersBuilder_.clear(); + } + nextPageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ListClustersResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersResponse getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ListClustersResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersResponse build() { + com.google.cloud.dataproc.v1beta2.ListClustersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersResponse buildPartial() { + com.google.cloud.dataproc.v1beta2.ListClustersResponse result = new com.google.cloud.dataproc.v1beta2.ListClustersResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (clustersBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + clusters_ = java.util.Collections.unmodifiableList(clusters_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.clusters_ = clusters_; + } else { + result.clusters_ = clustersBuilder_.build(); + } + result.nextPageToken_ = nextPageToken_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ListClustersResponse) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ListClustersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ListClustersResponse other) { + if (other == com.google.cloud.dataproc.v1beta2.ListClustersResponse.getDefaultInstance()) return this; + if (clustersBuilder_ == null) { + if (!other.clusters_.isEmpty()) { + if (clusters_.isEmpty()) { + clusters_ = other.clusters_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureClustersIsMutable(); + clusters_.addAll(other.clusters_); + } + onChanged(); + } + } else { + if (!other.clusters_.isEmpty()) { + if (clustersBuilder_.isEmpty()) { + clustersBuilder_.dispose(); + clustersBuilder_ = null; + clusters_ = other.clusters_; + bitField0_ = (bitField0_ & ~0x00000001); + clustersBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getClustersFieldBuilder() : null; + } else { + clustersBuilder_.addAllMessages(other.clusters_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ListClustersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ListClustersResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List clusters_ = + java.util.Collections.emptyList(); + private void ensureClustersIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + clusters_ = new java.util.ArrayList(clusters_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder> clustersBuilder_; + + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public java.util.List getClustersList() { + if (clustersBuilder_ == null) { + return java.util.Collections.unmodifiableList(clusters_); + } else { + return clustersBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public int getClustersCount() { + if (clustersBuilder_ == null) { + return clusters_.size(); + } else { + return clustersBuilder_.getCount(); + } + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.Cluster getClusters(int index) { + if (clustersBuilder_ == null) { + return clusters_.get(index); + } else { + return clustersBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder setClusters( + int index, com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clustersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureClustersIsMutable(); + clusters_.set(index, value); + onChanged(); + } else { + clustersBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder setClusters( + int index, com.google.cloud.dataproc.v1beta2.Cluster.Builder builderForValue) { + if (clustersBuilder_ == null) { + ensureClustersIsMutable(); + clusters_.set(index, builderForValue.build()); + onChanged(); + } else { + clustersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder addClusters(com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clustersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureClustersIsMutable(); + clusters_.add(value); + onChanged(); + } else { + clustersBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder addClusters( + int index, com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clustersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureClustersIsMutable(); + clusters_.add(index, value); + onChanged(); + } else { + clustersBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder addClusters( + com.google.cloud.dataproc.v1beta2.Cluster.Builder builderForValue) { + if (clustersBuilder_ == null) { + ensureClustersIsMutable(); + clusters_.add(builderForValue.build()); + onChanged(); + } else { + clustersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder addClusters( + int index, com.google.cloud.dataproc.v1beta2.Cluster.Builder builderForValue) { + if (clustersBuilder_ == null) { + ensureClustersIsMutable(); + clusters_.add(index, builderForValue.build()); + onChanged(); + } else { + clustersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder addAllClusters( + java.lang.Iterable values) { + if (clustersBuilder_ == null) { + ensureClustersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, clusters_); + onChanged(); + } else { + clustersBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder clearClusters() { + if (clustersBuilder_ == null) { + clusters_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + clustersBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public Builder removeClusters(int index) { + if (clustersBuilder_ == null) { + ensureClustersIsMutable(); + clusters_.remove(index); + onChanged(); + } else { + clustersBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.Cluster.Builder getClustersBuilder( + int index) { + return getClustersFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClustersOrBuilder( + int index) { + if (clustersBuilder_ == null) { + return clusters_.get(index); } else { + return clustersBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public java.util.List + getClustersOrBuilderList() { + if (clustersBuilder_ != null) { + return clustersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(clusters_); + } + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.Cluster.Builder addClustersBuilder() { + return getClustersFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance()); + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public com.google.cloud.dataproc.v1beta2.Cluster.Builder addClustersBuilder( + int index) { + return getClustersFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance()); + } + /** + *
+     * Output only. The clusters in the project.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + public java.util.List + getClustersBuilderList() { + return getClustersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder> + getClustersFieldBuilder() { + if (clustersBuilder_ == null) { + clustersBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder>( + clusters_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + clusters_ = null; + } + return clustersBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + *
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListClustersRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListClustersRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListClustersRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder setNextPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + nextPageToken_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListClustersRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder clearNextPageToken() { + + nextPageToken_ = getDefaultInstance().getNextPageToken(); + onChanged(); + return this; + } + /** + *
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListClustersRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder setNextPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + nextPageToken_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ListClustersResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListClustersResponse) + private static final com.google.cloud.dataproc.v1beta2.ListClustersResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ListClustersResponse(); + } + + public static com.google.cloud.dataproc.v1beta2.ListClustersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListClustersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListClustersResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListClustersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponseOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponseOrBuilder.java new file mode 100644 index 000000000000..4595d2cff6c2 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponseOrBuilder.java @@ -0,0 +1,75 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ListClustersResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ListClustersResponse) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + java.util.List + getClustersList(); + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + com.google.cloud.dataproc.v1beta2.Cluster getClusters(int index); + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + int getClustersCount(); + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + java.util.List + getClustersOrBuilderList(); + /** + *
+   * Output only. The clusters in the project.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Cluster clusters = 1; + */ + com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClustersOrBuilder( + int index); + + /** + *
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListClustersRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + java.lang.String getNextPageToken(); + /** + *
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListClustersRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + com.google.protobuf.ByteString + getNextPageTokenBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequest.java new file mode 100644 index 000000000000..cad31e59e6c0 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequest.java @@ -0,0 +1,1619 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to list jobs in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListJobsRequest} + */ +public final class ListJobsRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ListJobsRequest) + ListJobsRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListJobsRequest.newBuilder() to construct. + private ListJobsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListJobsRequest() { + projectId_ = ""; + region_ = ""; + pageSize_ = 0; + pageToken_ = ""; + clusterName_ = ""; + jobStateMatcher_ = 0; + filter_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListJobsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 16: { + + pageSize_ = input.readInt32(); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + pageToken_ = s; + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 40: { + int rawValue = input.readEnum(); + + jobStateMatcher_ = rawValue; + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 58: { + java.lang.String s = input.readStringRequireUtf8(); + + filter_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListJobsRequest.class, com.google.cloud.dataproc.v1beta2.ListJobsRequest.Builder.class); + } + + /** + *
+   * A matcher that specifies categories of job states.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher} + */ + public enum JobStateMatcher + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * Match all jobs, regardless of state.
+     * 
+ * + * ALL = 0; + */ + ALL(0), + /** + *
+     * Only match jobs in non-terminal states: PENDING, RUNNING, or
+     * CANCEL_PENDING.
+     * 
+ * + * ACTIVE = 1; + */ + ACTIVE(1), + /** + *
+     * Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
+     * 
+ * + * NON_ACTIVE = 2; + */ + NON_ACTIVE(2), + UNRECOGNIZED(-1), + ; + + /** + *
+     * Match all jobs, regardless of state.
+     * 
+ * + * ALL = 0; + */ + public static final int ALL_VALUE = 0; + /** + *
+     * Only match jobs in non-terminal states: PENDING, RUNNING, or
+     * CANCEL_PENDING.
+     * 
+ * + * ACTIVE = 1; + */ + public static final int ACTIVE_VALUE = 1; + /** + *
+     * Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
+     * 
+ * + * NON_ACTIVE = 2; + */ + public static final int NON_ACTIVE_VALUE = 2; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static JobStateMatcher valueOf(int value) { + return forNumber(value); + } + + public static JobStateMatcher forNumber(int value) { + switch (value) { + case 0: return ALL; + case 1: return ACTIVE; + case 2: return NON_ACTIVE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + JobStateMatcher> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public JobStateMatcher findValueByNumber(int number) { + return JobStateMatcher.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ListJobsRequest.getDescriptor().getEnumTypes().get(0); + } + + private static final JobStateMatcher[] VALUES = values(); + + public static JobStateMatcher valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private JobStateMatcher(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher) + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 6; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 6; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 6; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_; + /** + *
+   * Optional. The number of results to return in each response.
+   * 
+ * + * int32 page_size = 2; + */ + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + private volatile java.lang.Object pageToken_; + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 4; + private volatile java.lang.Object clusterName_; + /** + *
+   * Optional. If set, the returned jobs list includes only jobs that were
+   * submitted to the named cluster.
+   * 
+ * + * string cluster_name = 4; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Optional. If set, the returned jobs list includes only jobs that were
+   * submitted to the named cluster.
+   * 
+ * + * string cluster_name = 4; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_STATE_MATCHER_FIELD_NUMBER = 5; + private int jobStateMatcher_; + /** + *
+   * Optional. Specifies enumerated categories of jobs to list.
+   * (default = match ALL jobs).
+   * If `filter` is provided, `jobStateMatcher` will be ignored.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public int getJobStateMatcherValue() { + return jobStateMatcher_; + } + /** + *
+   * Optional. Specifies enumerated categories of jobs to list.
+   * (default = match ALL jobs).
+   * If `filter` is provided, `jobStateMatcher` will be ignored.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher getJobStateMatcher() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher result = com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher.valueOf(jobStateMatcher_); + return result == null ? com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher.UNRECOGNIZED : result; + } + + public static final int FILTER_FIELD_NUMBER = 7; + private volatile java.lang.Object filter_; + /** + *
+   * Optional. A filter constraining the jobs to list. Filters are
+   * case-sensitive and have the following syntax:
+   * [field = value] AND [field [= value]] ...
+   * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+   * key. **value** can be `*` to match all values.
+   * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 7; + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + /** + *
+   * Optional. A filter constraining the jobs to list. Filters are
+   * case-sensitive and have the following syntax:
+   * [field = value] AND [field [= value]] ...
+   * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+   * key. **value** can be `*` to match all values.
+   * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 7; + */ + public com.google.protobuf.ByteString + getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!getPageTokenBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pageToken_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, clusterName_); + } + if (jobStateMatcher_ != com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher.ALL.getNumber()) { + output.writeEnum(5, jobStateMatcher_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, region_); + } + if (!getFilterBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, filter_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, pageSize_); + } + if (!getPageTokenBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, pageToken_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, clusterName_); + } + if (jobStateMatcher_ != com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher.ALL.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, jobStateMatcher_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, region_); + } + if (!getFilterBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, filter_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ListJobsRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ListJobsRequest other = (com.google.cloud.dataproc.v1beta2.ListJobsRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && (getPageSize() + == other.getPageSize()); + result = result && getPageToken() + .equals(other.getPageToken()); + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && jobStateMatcher_ == other.jobStateMatcher_; + result = result && getFilter() + .equals(other.getFilter()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + hash = (37 * hash) + JOB_STATE_MATCHER_FIELD_NUMBER; + hash = (53 * hash) + jobStateMatcher_; + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ListJobsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to list jobs in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListJobsRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ListJobsRequest) + com.google.cloud.dataproc.v1beta2.ListJobsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListJobsRequest.class, com.google.cloud.dataproc.v1beta2.ListJobsRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ListJobsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + pageSize_ = 0; + + pageToken_ = ""; + + clusterName_ = ""; + + jobStateMatcher_ = 0; + + filter_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ListJobsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsRequest build() { + com.google.cloud.dataproc.v1beta2.ListJobsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.ListJobsRequest result = new com.google.cloud.dataproc.v1beta2.ListJobsRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.pageSize_ = pageSize_; + result.pageToken_ = pageToken_; + result.clusterName_ = clusterName_; + result.jobStateMatcher_ = jobStateMatcher_; + result.filter_ = filter_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ListJobsRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ListJobsRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ListJobsRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.ListJobsRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (other.jobStateMatcher_ != 0) { + setJobStateMatcherValue(other.getJobStateMatcherValue()); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ListJobsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ListJobsRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 6; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 6; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 6; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 6; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 6; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private int pageSize_ ; + /** + *
+     * Optional. The number of results to return in each response.
+     * 
+ * + * int32 page_size = 2; + */ + public int getPageSize() { + return pageSize_; + } + /** + *
+     * Optional. The number of results to return in each response.
+     * 
+ * + * int32 page_size = 2; + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The number of results to return in each response.
+     * 
+ * + * int32 page_size = 2; + */ + public Builder clearPageSize() { + + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public Builder setPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + pageToken_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public Builder clearPageToken() { + + pageToken_ = getDefaultInstance().getPageToken(); + onChanged(); + return this; + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public Builder setPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + pageToken_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Optional. If set, the returned jobs list includes only jobs that were
+     * submitted to the named cluster.
+     * 
+ * + * string cluster_name = 4; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. If set, the returned jobs list includes only jobs that were
+     * submitted to the named cluster.
+     * 
+ * + * string cluster_name = 4; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. If set, the returned jobs list includes only jobs that were
+     * submitted to the named cluster.
+     * 
+ * + * string cluster_name = 4; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. If set, the returned jobs list includes only jobs that were
+     * submitted to the named cluster.
+     * 
+ * + * string cluster_name = 4; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Optional. If set, the returned jobs list includes only jobs that were
+     * submitted to the named cluster.
+     * 
+ * + * string cluster_name = 4; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private int jobStateMatcher_ = 0; + /** + *
+     * Optional. Specifies enumerated categories of jobs to list.
+     * (default = match ALL jobs).
+     * If `filter` is provided, `jobStateMatcher` will be ignored.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public int getJobStateMatcherValue() { + return jobStateMatcher_; + } + /** + *
+     * Optional. Specifies enumerated categories of jobs to list.
+     * (default = match ALL jobs).
+     * If `filter` is provided, `jobStateMatcher` will be ignored.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public Builder setJobStateMatcherValue(int value) { + jobStateMatcher_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Specifies enumerated categories of jobs to list.
+     * (default = match ALL jobs).
+     * If `filter` is provided, `jobStateMatcher` will be ignored.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher getJobStateMatcher() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher result = com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher.valueOf(jobStateMatcher_); + return result == null ? com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher.UNRECOGNIZED : result; + } + /** + *
+     * Optional. Specifies enumerated categories of jobs to list.
+     * (default = match ALL jobs).
+     * If `filter` is provided, `jobStateMatcher` will be ignored.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public Builder setJobStateMatcher(com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher value) { + if (value == null) { + throw new NullPointerException(); + } + + jobStateMatcher_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Optional. Specifies enumerated categories of jobs to list.
+     * (default = match ALL jobs).
+     * If `filter` is provided, `jobStateMatcher` will be ignored.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + public Builder clearJobStateMatcher() { + + jobStateMatcher_ = 0; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + /** + *
+     * Optional. A filter constraining the jobs to list. Filters are
+     * case-sensitive and have the following syntax:
+     * [field = value] AND [field [= value]] ...
+     * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+     * key. **value** can be `*` to match all values.
+     * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 7; + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A filter constraining the jobs to list. Filters are
+     * case-sensitive and have the following syntax:
+     * [field = value] AND [field [= value]] ...
+     * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+     * key. **value** can be `*` to match all values.
+     * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 7; + */ + public com.google.protobuf.ByteString + getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A filter constraining the jobs to list. Filters are
+     * case-sensitive and have the following syntax:
+     * [field = value] AND [field [= value]] ...
+     * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+     * key. **value** can be `*` to match all values.
+     * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 7; + */ + public Builder setFilter( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + filter_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A filter constraining the jobs to list. Filters are
+     * case-sensitive and have the following syntax:
+     * [field = value] AND [field [= value]] ...
+     * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+     * key. **value** can be `*` to match all values.
+     * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 7; + */ + public Builder clearFilter() { + + filter_ = getDefaultInstance().getFilter(); + onChanged(); + return this; + } + /** + *
+     * Optional. A filter constraining the jobs to list. Filters are
+     * case-sensitive and have the following syntax:
+     * [field = value] AND [field [= value]] ...
+     * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+     * key. **value** can be `*` to match all values.
+     * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+     * Only the logical `AND` operator is supported; space-separated items are
+     * treated as having an implicit `AND` operator.
+     * Example filter:
+     * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+     * 
+ * + * string filter = 7; + */ + public Builder setFilterBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + filter_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ListJobsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListJobsRequest) + private static final com.google.cloud.dataproc.v1beta2.ListJobsRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ListJobsRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.ListJobsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListJobsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListJobsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequestOrBuilder.java new file mode 100644 index 000000000000..04401948f441 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequestOrBuilder.java @@ -0,0 +1,153 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ListJobsRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ListJobsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 6; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 6; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Optional. The number of results to return in each response.
+   * 
+ * + * int32 page_size = 2; + */ + int getPageSize(); + + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + java.lang.String getPageToken(); + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + com.google.protobuf.ByteString + getPageTokenBytes(); + + /** + *
+   * Optional. If set, the returned jobs list includes only jobs that were
+   * submitted to the named cluster.
+   * 
+ * + * string cluster_name = 4; + */ + java.lang.String getClusterName(); + /** + *
+   * Optional. If set, the returned jobs list includes only jobs that were
+   * submitted to the named cluster.
+   * 
+ * + * string cluster_name = 4; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Optional. Specifies enumerated categories of jobs to list.
+   * (default = match ALL jobs).
+   * If `filter` is provided, `jobStateMatcher` will be ignored.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + int getJobStateMatcherValue(); + /** + *
+   * Optional. Specifies enumerated categories of jobs to list.
+   * (default = match ALL jobs).
+   * If `filter` is provided, `jobStateMatcher` will be ignored.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher job_state_matcher = 5; + */ + com.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher getJobStateMatcher(); + + /** + *
+   * Optional. A filter constraining the jobs to list. Filters are
+   * case-sensitive and have the following syntax:
+   * [field = value] AND [field [= value]] ...
+   * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+   * key. **value** can be `*` to match all values.
+   * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 7; + */ + java.lang.String getFilter(); + /** + *
+   * Optional. A filter constraining the jobs to list. Filters are
+   * case-sensitive and have the following syntax:
+   * [field = value] AND [field [= value]] ...
+   * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
+   * key. **value** can be `*` to match all values.
+   * `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
+   * Only the logical `AND` operator is supported; space-separated items are
+   * treated as having an implicit `AND` operator.
+   * Example filter:
+   * status.state = ACTIVE AND labels.env = staging AND labels.starred = *
+   * 
+ * + * string filter = 7; + */ + com.google.protobuf.ByteString + getFilterBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponse.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponse.java new file mode 100644 index 000000000000..b058744758be --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponse.java @@ -0,0 +1,1031 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A list of jobs in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListJobsResponse} + */ +public final class ListJobsResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ListJobsResponse) + ListJobsResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListJobsResponse.newBuilder() to construct. + private ListJobsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListJobsResponse() { + jobs_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListJobsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + jobs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + jobs_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.Job.parser(), extensionRegistry)); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + nextPageToken_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + jobs_ = java.util.Collections.unmodifiableList(jobs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListJobsResponse.class, com.google.cloud.dataproc.v1beta2.ListJobsResponse.Builder.class); + } + + private int bitField0_; + public static final int JOBS_FIELD_NUMBER = 1; + private java.util.List jobs_; + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public java.util.List getJobsList() { + return jobs_; + } + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public java.util.List + getJobsOrBuilderList() { + return jobs_; + } + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public int getJobsCount() { + return jobs_.size(); + } + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.Job getJobs(int index) { + return jobs_.get(index); + } + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobsOrBuilder( + int index) { + return jobs_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + private volatile java.lang.Object nextPageToken_; + /** + *
+   * Optional. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListJobsRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + *
+   * Optional. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListJobsRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < jobs_.size(); i++) { + output.writeMessage(1, jobs_.get(i)); + } + if (!getNextPageTokenBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < jobs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, jobs_.get(i)); + } + if (!getNextPageTokenBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ListJobsResponse)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ListJobsResponse other = (com.google.cloud.dataproc.v1beta2.ListJobsResponse) obj; + + boolean result = true; + result = result && getJobsList() + .equals(other.getJobsList()); + result = result && getNextPageToken() + .equals(other.getNextPageToken()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getJobsCount() > 0) { + hash = (37 * hash) + JOBS_FIELD_NUMBER; + hash = (53 * hash) + getJobsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ListJobsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A list of jobs in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListJobsResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ListJobsResponse) + com.google.cloud.dataproc.v1beta2.ListJobsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListJobsResponse.class, com.google.cloud.dataproc.v1beta2.ListJobsResponse.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ListJobsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getJobsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (jobsBuilder_ == null) { + jobs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + jobsBuilder_.clear(); + } + nextPageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_ListJobsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsResponse getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ListJobsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsResponse build() { + com.google.cloud.dataproc.v1beta2.ListJobsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsResponse buildPartial() { + com.google.cloud.dataproc.v1beta2.ListJobsResponse result = new com.google.cloud.dataproc.v1beta2.ListJobsResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (jobsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + jobs_ = java.util.Collections.unmodifiableList(jobs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.jobs_ = jobs_; + } else { + result.jobs_ = jobsBuilder_.build(); + } + result.nextPageToken_ = nextPageToken_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ListJobsResponse) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ListJobsResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ListJobsResponse other) { + if (other == com.google.cloud.dataproc.v1beta2.ListJobsResponse.getDefaultInstance()) return this; + if (jobsBuilder_ == null) { + if (!other.jobs_.isEmpty()) { + if (jobs_.isEmpty()) { + jobs_ = other.jobs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureJobsIsMutable(); + jobs_.addAll(other.jobs_); + } + onChanged(); + } + } else { + if (!other.jobs_.isEmpty()) { + if (jobsBuilder_.isEmpty()) { + jobsBuilder_.dispose(); + jobsBuilder_ = null; + jobs_ = other.jobs_; + bitField0_ = (bitField0_ & ~0x00000001); + jobsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getJobsFieldBuilder() : null; + } else { + jobsBuilder_.addAllMessages(other.jobs_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ListJobsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ListJobsResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List jobs_ = + java.util.Collections.emptyList(); + private void ensureJobsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + jobs_ = new java.util.ArrayList(jobs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder> jobsBuilder_; + + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public java.util.List getJobsList() { + if (jobsBuilder_ == null) { + return java.util.Collections.unmodifiableList(jobs_); + } else { + return jobsBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public int getJobsCount() { + if (jobsBuilder_ == null) { + return jobs_.size(); + } else { + return jobsBuilder_.getCount(); + } + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.Job getJobs(int index) { + if (jobsBuilder_ == null) { + return jobs_.get(index); + } else { + return jobsBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder setJobs( + int index, com.google.cloud.dataproc.v1beta2.Job value) { + if (jobsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureJobsIsMutable(); + jobs_.set(index, value); + onChanged(); + } else { + jobsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder setJobs( + int index, com.google.cloud.dataproc.v1beta2.Job.Builder builderForValue) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.set(index, builderForValue.build()); + onChanged(); + } else { + jobsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder addJobs(com.google.cloud.dataproc.v1beta2.Job value) { + if (jobsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureJobsIsMutable(); + jobs_.add(value); + onChanged(); + } else { + jobsBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder addJobs( + int index, com.google.cloud.dataproc.v1beta2.Job value) { + if (jobsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureJobsIsMutable(); + jobs_.add(index, value); + onChanged(); + } else { + jobsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder addJobs( + com.google.cloud.dataproc.v1beta2.Job.Builder builderForValue) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.add(builderForValue.build()); + onChanged(); + } else { + jobsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder addJobs( + int index, com.google.cloud.dataproc.v1beta2.Job.Builder builderForValue) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.add(index, builderForValue.build()); + onChanged(); + } else { + jobsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder addAllJobs( + java.lang.Iterable values) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jobs_); + onChanged(); + } else { + jobsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder clearJobs() { + if (jobsBuilder_ == null) { + jobs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + jobsBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public Builder removeJobs(int index) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.remove(index); + onChanged(); + } else { + jobsBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.Job.Builder getJobsBuilder( + int index) { + return getJobsFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobsOrBuilder( + int index) { + if (jobsBuilder_ == null) { + return jobs_.get(index); } else { + return jobsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public java.util.List + getJobsOrBuilderList() { + if (jobsBuilder_ != null) { + return jobsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(jobs_); + } + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.Job.Builder addJobsBuilder() { + return getJobsFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance()); + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public com.google.cloud.dataproc.v1beta2.Job.Builder addJobsBuilder( + int index) { + return getJobsFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance()); + } + /** + *
+     * Output only. Jobs list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + public java.util.List + getJobsBuilderList() { + return getJobsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder> + getJobsFieldBuilder() { + if (jobsBuilder_ == null) { + jobsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder>( + jobs_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + jobs_ = null; + } + return jobsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + *
+     * Optional. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListJobsRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListJobsRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListJobsRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder setNextPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + nextPageToken_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListJobsRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder clearNextPageToken() { + + nextPageToken_ = getDefaultInstance().getNextPageToken(); + onChanged(); + return this; + } + /** + *
+     * Optional. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * `page_token` in a subsequent <code>ListJobsRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder setNextPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + nextPageToken_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ListJobsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListJobsResponse) + private static final com.google.cloud.dataproc.v1beta2.ListJobsResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ListJobsResponse(); + } + + public static com.google.cloud.dataproc.v1beta2.ListJobsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListJobsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListJobsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListJobsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponseOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponseOrBuilder.java new file mode 100644 index 000000000000..8cdc4dd286cb --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponseOrBuilder.java @@ -0,0 +1,75 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ListJobsResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ListJobsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + java.util.List + getJobsList(); + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + com.google.cloud.dataproc.v1beta2.Job getJobs(int index); + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + int getJobsCount(); + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + java.util.List + getJobsOrBuilderList(); + /** + *
+   * Output only. Jobs list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.Job jobs = 1; + */ + com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobsOrBuilder( + int index); + + /** + *
+   * Optional. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListJobsRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + java.lang.String getNextPageToken(); + /** + *
+   * Optional. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * `page_token` in a subsequent <code>ListJobsRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + com.google.protobuf.ByteString + getNextPageTokenBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequest.java new file mode 100644 index 000000000000..8ed9ec9a2e20 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequest.java @@ -0,0 +1,827 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to list workflow templates in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest} + */ +public final class ListWorkflowTemplatesRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) + ListWorkflowTemplatesRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListWorkflowTemplatesRequest.newBuilder() to construct. + private ListWorkflowTemplatesRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListWorkflowTemplatesRequest() { + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListWorkflowTemplatesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 16: { + + pageSize_ = input.readInt32(); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + pageToken_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.class, com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + public com.google.protobuf.ByteString + getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_; + /** + *
+   * Optional. The maximum number of results to return in each response.
+   * 
+ * + * int32 page_size = 2; + */ + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + private volatile java.lang.Object pageToken_; + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!getPageTokenBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pageToken_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, pageSize_); + } + if (!getPageTokenBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, pageToken_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest other = (com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) obj; + + boolean result = true; + result = result && getParent() + .equals(other.getParent()); + result = result && (getPageSize() + == other.getPageSize()); + result = result && getPageToken() + .equals(other.getPageToken()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to list workflow templates in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.class, com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + pageSize_ = 0; + + pageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest build() { + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest result = new com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest(this); + result.parent_ = parent_; + result.pageSize_ = pageSize_; + result.pageToken_ = pageToken_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public com.google.protobuf.ByteString + getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder setParent( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + *
+     * Required. The "resource name" of the region, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}`
+     * 
+ * + * string parent = 1; + */ + public Builder setParentBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private int pageSize_ ; + /** + *
+     * Optional. The maximum number of results to return in each response.
+     * 
+ * + * int32 page_size = 2; + */ + public int getPageSize() { + return pageSize_; + } + /** + *
+     * Optional. The maximum number of results to return in each response.
+     * 
+ * + * int32 page_size = 2; + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The maximum number of results to return in each response.
+     * 
+ * + * int32 page_size = 2; + */ + public Builder clearPageSize() { + + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public Builder setPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + pageToken_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public Builder clearPageToken() { + + pageToken_ = getDefaultInstance().getPageToken(); + onChanged(); + return this; + } + /** + *
+     * Optional. The page token, returned by a previous call, to request the
+     * next page of results.
+     * 
+ * + * string page_token = 3; + */ + public Builder setPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + pageToken_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) + private static final com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListWorkflowTemplatesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListWorkflowTemplatesRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequestOrBuilder.java new file mode 100644 index 000000000000..46e6a65339e4 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequestOrBuilder.java @@ -0,0 +1,60 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ListWorkflowTemplatesRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + java.lang.String getParent(); + /** + *
+   * Required. The "resource name" of the region, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}`
+   * 
+ * + * string parent = 1; + */ + com.google.protobuf.ByteString + getParentBytes(); + + /** + *
+   * Optional. The maximum number of results to return in each response.
+   * 
+ * + * int32 page_size = 2; + */ + int getPageSize(); + + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + java.lang.String getPageToken(); + /** + *
+   * Optional. The page token, returned by a previous call, to request the
+   * next page of results.
+   * 
+ * + * string page_token = 3; + */ + com.google.protobuf.ByteString + getPageTokenBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java new file mode 100644 index 000000000000..affe7cad47b7 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java @@ -0,0 +1,1031 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A response to a request to list workflow templates in a project.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse} + */ +public final class ListWorkflowTemplatesResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) + ListWorkflowTemplatesResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListWorkflowTemplatesResponse.newBuilder() to construct. + private ListWorkflowTemplatesResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListWorkflowTemplatesResponse() { + templates_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListWorkflowTemplatesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + templates_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + templates_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowTemplate.parser(), extensionRegistry)); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + nextPageToken_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + templates_ = java.util.Collections.unmodifiableList(templates_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.class, com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.Builder.class); + } + + private int bitField0_; + public static final int TEMPLATES_FIELD_NUMBER = 1; + private java.util.List templates_; + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public java.util.List getTemplatesList() { + return templates_; + } + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public java.util.List + getTemplatesOrBuilderList() { + return templates_; + } + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public int getTemplatesCount() { + return templates_.size(); + } + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplates(int index) { + return templates_.get(index); + } + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplatesOrBuilder( + int index) { + return templates_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + private volatile java.lang.Object nextPageToken_; + /** + *
+   * Output only. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + *
+   * Output only. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < templates_.size(); i++) { + output.writeMessage(1, templates_.get(i)); + } + if (!getNextPageTokenBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < templates_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, templates_.get(i)); + } + if (!getNextPageTokenBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse other = (com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) obj; + + boolean result = true; + result = result && getTemplatesList() + .equals(other.getTemplatesList()); + result = result && getNextPageToken() + .equals(other.getNextPageToken()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getTemplatesCount() > 0) { + hash = (37 * hash) + TEMPLATES_FIELD_NUMBER; + hash = (53 * hash) + getTemplatesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A response to a request to list workflow templates in a project.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.class, com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getTemplatesFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (templatesBuilder_ == null) { + templates_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + templatesBuilder_.clear(); + } + nextPageToken_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse build() { + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse buildPartial() { + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse result = new com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (templatesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + templates_ = java.util.Collections.unmodifiableList(templates_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.templates_ = templates_; + } else { + result.templates_ = templatesBuilder_.build(); + } + result.nextPageToken_ = nextPageToken_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse other) { + if (other == com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.getDefaultInstance()) return this; + if (templatesBuilder_ == null) { + if (!other.templates_.isEmpty()) { + if (templates_.isEmpty()) { + templates_ = other.templates_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTemplatesIsMutable(); + templates_.addAll(other.templates_); + } + onChanged(); + } + } else { + if (!other.templates_.isEmpty()) { + if (templatesBuilder_.isEmpty()) { + templatesBuilder_.dispose(); + templatesBuilder_ = null; + templates_ = other.templates_; + bitField0_ = (bitField0_ & ~0x00000001); + templatesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getTemplatesFieldBuilder() : null; + } else { + templatesBuilder_.addAllMessages(other.templates_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List templates_ = + java.util.Collections.emptyList(); + private void ensureTemplatesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + templates_ = new java.util.ArrayList(templates_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> templatesBuilder_; + + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public java.util.List getTemplatesList() { + if (templatesBuilder_ == null) { + return java.util.Collections.unmodifiableList(templates_); + } else { + return templatesBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public int getTemplatesCount() { + if (templatesBuilder_ == null) { + return templates_.size(); + } else { + return templatesBuilder_.getCount(); + } + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplates(int index) { + if (templatesBuilder_ == null) { + return templates_.get(index); + } else { + return templatesBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder setTemplates( + int index, com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templatesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTemplatesIsMutable(); + templates_.set(index, value); + onChanged(); + } else { + templatesBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder setTemplates( + int index, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder builderForValue) { + if (templatesBuilder_ == null) { + ensureTemplatesIsMutable(); + templates_.set(index, builderForValue.build()); + onChanged(); + } else { + templatesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder addTemplates(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templatesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTemplatesIsMutable(); + templates_.add(value); + onChanged(); + } else { + templatesBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder addTemplates( + int index, com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templatesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTemplatesIsMutable(); + templates_.add(index, value); + onChanged(); + } else { + templatesBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder addTemplates( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder builderForValue) { + if (templatesBuilder_ == null) { + ensureTemplatesIsMutable(); + templates_.add(builderForValue.build()); + onChanged(); + } else { + templatesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder addTemplates( + int index, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder builderForValue) { + if (templatesBuilder_ == null) { + ensureTemplatesIsMutable(); + templates_.add(index, builderForValue.build()); + onChanged(); + } else { + templatesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder addAllTemplates( + java.lang.Iterable values) { + if (templatesBuilder_ == null) { + ensureTemplatesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, templates_); + onChanged(); + } else { + templatesBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder clearTemplates() { + if (templatesBuilder_ == null) { + templates_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + templatesBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public Builder removeTemplates(int index) { + if (templatesBuilder_ == null) { + ensureTemplatesIsMutable(); + templates_.remove(index); + onChanged(); + } else { + templatesBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder getTemplatesBuilder( + int index) { + return getTemplatesFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplatesOrBuilder( + int index) { + if (templatesBuilder_ == null) { + return templates_.get(index); } else { + return templatesBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public java.util.List + getTemplatesOrBuilderList() { + if (templatesBuilder_ != null) { + return templatesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(templates_); + } + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder addTemplatesBuilder() { + return getTemplatesFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance()); + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder addTemplatesBuilder( + int index) { + return getTemplatesFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance()); + } + /** + *
+     * Output only. WorkflowTemplates list.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + public java.util.List + getTemplatesBuilderList() { + return getTemplatesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> + getTemplatesFieldBuilder() { + if (templatesBuilder_ == null) { + templatesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder>( + templates_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + templates_ = null; + } + return templatesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + *
+     * Output only. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder setNextPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + nextPageToken_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder clearNextPageToken() { + + nextPageToken_ = getDefaultInstance().getNextPageToken(); + onChanged(); + return this; + } + /** + *
+     * Output only. This token is included in the response if there are more results
+     * to fetch. To fetch additional results, provide this value as the
+     * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+     * 
+ * + * string next_page_token = 2; + */ + public Builder setNextPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + nextPageToken_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) + private static final com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse(); + } + + public static com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListWorkflowTemplatesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListWorkflowTemplatesResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java new file mode 100644 index 000000000000..8b110e1d1c42 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java @@ -0,0 +1,75 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ListWorkflowTemplatesResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + java.util.List + getTemplatesList(); + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplates(int index); + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + int getTemplatesCount(); + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + java.util.List + getTemplatesOrBuilderList(); + /** + *
+   * Output only. WorkflowTemplates list.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowTemplate templates = 1; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplatesOrBuilder( + int index); + + /** + *
+   * Output only. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + java.lang.String getNextPageToken(); + /** + *
+   * Output only. This token is included in the response if there are more results
+   * to fetch. To fetch additional results, provide this value as the
+   * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
+   * 
+ * + * string next_page_token = 2; + */ + com.google.protobuf.ByteString + getNextPageTokenBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfig.java new file mode 100644 index 000000000000..490da16d23ae --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfig.java @@ -0,0 +1,1203 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The runtime logging config of the job.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.LoggingConfig} + */ +public final class LoggingConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.LoggingConfig) + LoggingConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use LoggingConfig.newBuilder() to construct. + private LoggingConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private LoggingConfig() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LoggingConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + driverLogLevels_ = com.google.protobuf.MapField.newMapField( + DriverLogLevelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry + driverLogLevels__ = input.readMessage( + DriverLogLevelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + driverLogLevels_.getMutableMap().put( + driverLogLevels__.getKey(), driverLogLevels__.getValue()); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetDriverLogLevels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.LoggingConfig.class, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder.class); + } + + /** + *
+   * The Log4j level for job execution. When running an
+   * [Apache Hive](http://hive.apache.org/) job, Cloud
+   * Dataproc configures the Hive client to an equivalent verbosity level.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.LoggingConfig.Level} + */ + public enum Level + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * Level is unspecified. Use default level for log4j.
+     * 
+ * + * LEVEL_UNSPECIFIED = 0; + */ + LEVEL_UNSPECIFIED(0), + /** + *
+     * Use ALL level for log4j.
+     * 
+ * + * ALL = 1; + */ + ALL(1), + /** + *
+     * Use TRACE level for log4j.
+     * 
+ * + * TRACE = 2; + */ + TRACE(2), + /** + *
+     * Use DEBUG level for log4j.
+     * 
+ * + * DEBUG = 3; + */ + DEBUG(3), + /** + *
+     * Use INFO level for log4j.
+     * 
+ * + * INFO = 4; + */ + INFO(4), + /** + *
+     * Use WARN level for log4j.
+     * 
+ * + * WARN = 5; + */ + WARN(5), + /** + *
+     * Use ERROR level for log4j.
+     * 
+ * + * ERROR = 6; + */ + ERROR(6), + /** + *
+     * Use FATAL level for log4j.
+     * 
+ * + * FATAL = 7; + */ + FATAL(7), + /** + *
+     * Turn off log4j.
+     * 
+ * + * OFF = 8; + */ + OFF(8), + UNRECOGNIZED(-1), + ; + + /** + *
+     * Level is unspecified. Use default level for log4j.
+     * 
+ * + * LEVEL_UNSPECIFIED = 0; + */ + public static final int LEVEL_UNSPECIFIED_VALUE = 0; + /** + *
+     * Use ALL level for log4j.
+     * 
+ * + * ALL = 1; + */ + public static final int ALL_VALUE = 1; + /** + *
+     * Use TRACE level for log4j.
+     * 
+ * + * TRACE = 2; + */ + public static final int TRACE_VALUE = 2; + /** + *
+     * Use DEBUG level for log4j.
+     * 
+ * + * DEBUG = 3; + */ + public static final int DEBUG_VALUE = 3; + /** + *
+     * Use INFO level for log4j.
+     * 
+ * + * INFO = 4; + */ + public static final int INFO_VALUE = 4; + /** + *
+     * Use WARN level for log4j.
+     * 
+ * + * WARN = 5; + */ + public static final int WARN_VALUE = 5; + /** + *
+     * Use ERROR level for log4j.
+     * 
+ * + * ERROR = 6; + */ + public static final int ERROR_VALUE = 6; + /** + *
+     * Use FATAL level for log4j.
+     * 
+ * + * FATAL = 7; + */ + public static final int FATAL_VALUE = 7; + /** + *
+     * Turn off log4j.
+     * 
+ * + * OFF = 8; + */ + public static final int OFF_VALUE = 8; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Level valueOf(int value) { + return forNumber(value); + } + + public static Level forNumber(int value) { + switch (value) { + case 0: return LEVEL_UNSPECIFIED; + case 1: return ALL; + case 2: return TRACE; + case 3: return DEBUG; + case 4: return INFO; + case 5: return WARN; + case 6: return ERROR; + case 7: return FATAL; + case 8: return OFF; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + Level> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Level findValueByNumber(int number) { + return Level.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.LoggingConfig.getDescriptor().getEnumTypes().get(0); + } + + private static final Level[] VALUES = values(); + + public static Level valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Level(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.LoggingConfig.Level) + } + + public static final int DRIVER_LOG_LEVELS_FIELD_NUMBER = 2; + private static final class DriverLogLevelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.Integer> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_DriverLogLevelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.ENUM, + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level.LEVEL_UNSPECIFIED.getNumber()); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.Integer> driverLogLevels_; + private com.google.protobuf.MapField + internalGetDriverLogLevels() { + if (driverLogLevels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + DriverLogLevelsDefaultEntryHolder.defaultEntry); + } + return driverLogLevels_; + } + private static final + com.google.protobuf.Internal.MapAdapter.Converter< + java.lang.Integer, com.google.cloud.dataproc.v1beta2.LoggingConfig.Level> driverLogLevelsValueConverter = + com.google.protobuf.Internal.MapAdapter.newEnumConverter( + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level.internalGetValueMap(), + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level.UNRECOGNIZED); + private static final java.util.Map + internalGetAdaptedDriverLogLevelsMap( + java.util.Map map) { + return new com.google.protobuf.Internal.MapAdapter< + java.lang.String, com.google.cloud.dataproc.v1beta2.LoggingConfig.Level, java.lang.Integer>( + map, driverLogLevelsValueConverter); + } + + public int getDriverLogLevelsCount() { + return internalGetDriverLogLevels().getMap().size(); + } + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public boolean containsDriverLogLevels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetDriverLogLevels().getMap().containsKey(key); + } + /** + * Use {@link #getDriverLogLevelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map + getDriverLogLevels() { + return getDriverLogLevelsMap(); + } + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public java.util.Map + getDriverLogLevelsMap() { + return internalGetAdaptedDriverLogLevelsMap( + internalGetDriverLogLevels().getMap());} + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Level getDriverLogLevelsOrDefault( + java.lang.String key, + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + return map.containsKey(key) + ? driverLogLevelsValueConverter.doForward(map.get(key)) + : defaultValue; + } + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Level getDriverLogLevelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return driverLogLevelsValueConverter.doForward(map.get(key)); + } + /** + * Use {@link #getDriverLogLevelsValueMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map + getDriverLogLevelsValue() { + return getDriverLogLevelsValueMap(); + } + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public java.util.Map + getDriverLogLevelsValueMap() { + return internalGetDriverLogLevels().getMap(); + } + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public int getDriverLogLevelsValueOrDefault( + java.lang.String key, + int defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public int getDriverLogLevelsValueOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetDriverLogLevels(), + DriverLogLevelsDefaultEntryHolder.defaultEntry, + 2); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (java.util.Map.Entry entry + : internalGetDriverLogLevels().getMap().entrySet()) { + com.google.protobuf.MapEntry + driverLogLevels__ = DriverLogLevelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, driverLogLevels__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.LoggingConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.LoggingConfig other = (com.google.cloud.dataproc.v1beta2.LoggingConfig) obj; + + boolean result = true; + result = result && internalGetDriverLogLevels().equals( + other.internalGetDriverLogLevels()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetDriverLogLevels().getMap().isEmpty()) { + hash = (37 * hash) + DRIVER_LOG_LEVELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetDriverLogLevels().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.LoggingConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.LoggingConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The runtime logging config of the job.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.LoggingConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.LoggingConfig) + com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetDriverLogLevels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 2: + return internalGetMutableDriverLogLevels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.LoggingConfig.class, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.LoggingConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + internalGetMutableDriverLogLevels().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_LoggingConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LoggingConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LoggingConfig build() { + com.google.cloud.dataproc.v1beta2.LoggingConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LoggingConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.LoggingConfig result = new com.google.cloud.dataproc.v1beta2.LoggingConfig(this); + int from_bitField0_ = bitField0_; + result.driverLogLevels_ = internalGetDriverLogLevels(); + result.driverLogLevels_.makeImmutable(); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.LoggingConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.LoggingConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.LoggingConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance()) return this; + internalGetMutableDriverLogLevels().mergeFrom( + other.internalGetDriverLogLevels()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.LoggingConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.LoggingConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private com.google.protobuf.MapField< + java.lang.String, java.lang.Integer> driverLogLevels_; + private com.google.protobuf.MapField + internalGetDriverLogLevels() { + if (driverLogLevels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + DriverLogLevelsDefaultEntryHolder.defaultEntry); + } + return driverLogLevels_; + } + private com.google.protobuf.MapField + internalGetMutableDriverLogLevels() { + onChanged();; + if (driverLogLevels_ == null) { + driverLogLevels_ = com.google.protobuf.MapField.newMapField( + DriverLogLevelsDefaultEntryHolder.defaultEntry); + } + if (!driverLogLevels_.isMutable()) { + driverLogLevels_ = driverLogLevels_.copy(); + } + return driverLogLevels_; + } + + public int getDriverLogLevelsCount() { + return internalGetDriverLogLevels().getMap().size(); + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public boolean containsDriverLogLevels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetDriverLogLevels().getMap().containsKey(key); + } + /** + * Use {@link #getDriverLogLevelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map + getDriverLogLevels() { + return getDriverLogLevelsMap(); + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public java.util.Map + getDriverLogLevelsMap() { + return internalGetAdaptedDriverLogLevelsMap( + internalGetDriverLogLevels().getMap());} + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Level getDriverLogLevelsOrDefault( + java.lang.String key, + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + return map.containsKey(key) + ? driverLogLevelsValueConverter.doForward(map.get(key)) + : defaultValue; + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Level getDriverLogLevelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return driverLogLevelsValueConverter.doForward(map.get(key)); + } + /** + * Use {@link #getDriverLogLevelsValueMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map + getDriverLogLevelsValue() { + return getDriverLogLevelsValueMap(); + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public java.util.Map + getDriverLogLevelsValueMap() { + return internalGetDriverLogLevels().getMap(); + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public int getDriverLogLevelsValueOrDefault( + java.lang.String key, + int defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public int getDriverLogLevelsValueOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetDriverLogLevels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearDriverLogLevels() { + internalGetMutableDriverLogLevels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + public Builder removeDriverLogLevels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableDriverLogLevels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableDriverLogLevels() { + return internalGetAdaptedDriverLogLevelsMap( + internalGetMutableDriverLogLevels().getMutableMap()); + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + public Builder putDriverLogLevels( + java.lang.String key, + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableDriverLogLevels().getMutableMap() + .put(key, driverLogLevelsValueConverter.doBackward(value)); + return this; + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + public Builder putAllDriverLogLevels( + java.util.Map values) { + internalGetAdaptedDriverLogLevelsMap( + internalGetMutableDriverLogLevels().getMutableMap()) + .putAll(values); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableDriverLogLevelsValue() { + return internalGetMutableDriverLogLevels().getMutableMap(); + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + public Builder putDriverLogLevelsValue( + java.lang.String key, + int value) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableDriverLogLevels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * The per-package log levels for the driver. This may include
+     * "root" package name to configure rootLogger.
+     * Examples:
+     *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+     * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + public Builder putAllDriverLogLevelsValue( + java.util.Map values) { + internalGetMutableDriverLogLevels().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.LoggingConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.LoggingConfig) + private static final com.google.cloud.dataproc.v1beta2.LoggingConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.LoggingConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.LoggingConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public LoggingConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LoggingConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.LoggingConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfigOrBuilder.java new file mode 100644 index 000000000000..b3414892ac1f --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfigOrBuilder.java @@ -0,0 +1,121 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface LoggingConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.LoggingConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + int getDriverLogLevelsCount(); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + boolean containsDriverLogLevels( + java.lang.String key); + /** + * Use {@link #getDriverLogLevelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getDriverLogLevels(); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + java.util.Map + getDriverLogLevelsMap(); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level getDriverLogLevelsOrDefault( + java.lang.String key, + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level defaultValue); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig.Level getDriverLogLevelsOrThrow( + java.lang.String key); + /** + * Use {@link #getDriverLogLevelsValueMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getDriverLogLevelsValue(); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + java.util.Map + getDriverLogLevelsValueMap(); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + int getDriverLogLevelsValueOrDefault( + java.lang.String key, + int defaultValue); + /** + *
+   * The per-package log levels for the driver. This may include
+   * "root" package name to configure rootLogger.
+   * Examples:
+   *   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+   * 
+ * + * map<string, .google.cloud.dataproc.v1beta2.LoggingConfig.Level> driver_log_levels = 2; + */ + + int getDriverLogLevelsValueOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedCluster.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedCluster.java new file mode 100644 index 000000000000..500e43401be8 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedCluster.java @@ -0,0 +1,1222 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Cluster that is managed by the workflow.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ManagedCluster} + */ +public final class ManagedCluster extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ManagedCluster) + ManagedClusterOrBuilder { +private static final long serialVersionUID = 0L; + // Use ManagedCluster.newBuilder() to construct. + private ManagedCluster(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ManagedCluster() { + clusterName_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ManagedCluster( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder subBuilder = null; + if (config_ != null) { + subBuilder = config_.toBuilder(); + } + config_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(config_); + config_ = subBuilder.buildPartial(); + } + + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000004; + } + com.google.protobuf.MapEntry + labels__ = input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put( + labels__.getKey(), labels__.getValue()); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 4: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ManagedCluster.class, com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder.class); + } + + private int bitField0_; + public static final int CLUSTER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The cluster name prefix. A unique cluster name will be formed by
+   * appending a random suffix.
+   * The name must contain only lower-case letters (a-z), numbers (0-9),
+   * and hyphens (-). Must begin with a letter. Cannot begin or end with
+   * hyphen. Must consist of between 2 and 35 characters.
+   * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The cluster name prefix. A unique cluster name will be formed by
+   * appending a random suffix.
+   * The name must contain only lower-case letters (a-z), numbers (0-9),
+   * and hyphens (-). Must begin with a letter. Cannot begin or end with
+   * hyphen. Must consist of between 2 and 35 characters.
+   * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONFIG_FIELD_NUMBER = 3; + private com.google.cloud.dataproc.v1beta2.ClusterConfig config_; + /** + *
+   * Required. The cluster configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public boolean hasConfig() { + return config_ != null; + } + /** + *
+   * Required. The cluster configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig() { + return config_ == null ? com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_; + } + /** + *
+   * Required. The cluster configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder() { + return getConfig(); + } + + public static final int LABELS_FIELD_NUMBER = 4; + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_); + } + if (config_ != null) { + output.writeMessage(3, getConfig()); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetLabels(), + LabelsDefaultEntryHolder.defaultEntry, + 4); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_); + } + if (config_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getConfig()); + } + for (java.util.Map.Entry entry + : internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + labels__ = LabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, labels__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ManagedCluster)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ManagedCluster other = (com.google.cloud.dataproc.v1beta2.ManagedCluster) obj; + + boolean result = true; + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && (hasConfig() == other.hasConfig()); + if (hasConfig()) { + result = result && getConfig() + .equals(other.getConfig()); + } + result = result && internalGetLabels().equals( + other.internalGetLabels()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + if (hasConfig()) { + hash = (37 * hash) + CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getConfig().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ManagedCluster parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ManagedCluster prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Cluster that is managed by the workflow.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ManagedCluster} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ManagedCluster) + com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 4: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 4: + return internalGetMutableLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ManagedCluster.class, com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ManagedCluster.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + clusterName_ = ""; + + if (configBuilder_ == null) { + config_ = null; + } else { + config_ = null; + configBuilder_ = null; + } + internalGetMutableLabels().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedCluster getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedCluster build() { + com.google.cloud.dataproc.v1beta2.ManagedCluster result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedCluster buildPartial() { + com.google.cloud.dataproc.v1beta2.ManagedCluster result = new com.google.cloud.dataproc.v1beta2.ManagedCluster(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.clusterName_ = clusterName_; + if (configBuilder_ == null) { + result.config_ = config_; + } else { + result.config_ = configBuilder_.build(); + } + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ManagedCluster) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ManagedCluster)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ManagedCluster other) { + if (other == com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance()) return this; + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (other.hasConfig()) { + mergeConfig(other.getConfig()); + } + internalGetMutableLabels().mergeFrom( + other.internalGetLabels()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ManagedCluster parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ManagedCluster) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The cluster name prefix. A unique cluster name will be formed by
+     * appending a random suffix.
+     * The name must contain only lower-case letters (a-z), numbers (0-9),
+     * and hyphens (-). Must begin with a letter. Cannot begin or end with
+     * hyphen. Must consist of between 2 and 35 characters.
+     * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The cluster name prefix. A unique cluster name will be formed by
+     * appending a random suffix.
+     * The name must contain only lower-case letters (a-z), numbers (0-9),
+     * and hyphens (-). Must begin with a letter. Cannot begin or end with
+     * hyphen. Must consist of between 2 and 35 characters.
+     * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The cluster name prefix. A unique cluster name will be formed by
+     * appending a random suffix.
+     * The name must contain only lower-case letters (a-z), numbers (0-9),
+     * and hyphens (-). Must begin with a letter. Cannot begin or end with
+     * hyphen. Must consist of between 2 and 35 characters.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name prefix. A unique cluster name will be formed by
+     * appending a random suffix.
+     * The name must contain only lower-case letters (a-z), numbers (0-9),
+     * and hyphens (-). Must begin with a letter. Cannot begin or end with
+     * hyphen. Must consist of between 2 and 35 characters.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name prefix. A unique cluster name will be formed by
+     * appending a random suffix.
+     * The name must contain only lower-case letters (a-z), numbers (0-9),
+     * and hyphens (-). Must begin with a letter. Cannot begin or end with
+     * hyphen. Must consist of between 2 and 35 characters.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ClusterConfig config_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder> configBuilder_; + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public boolean hasConfig() { + return configBuilder_ != null || config_ != null; + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig() { + if (configBuilder_ == null) { + return config_ == null ? com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_; + } else { + return configBuilder_.getMessage(); + } + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder setConfig(com.google.cloud.dataproc.v1beta2.ClusterConfig value) { + if (configBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + config_ = value; + onChanged(); + } else { + configBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder setConfig( + com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder builderForValue) { + if (configBuilder_ == null) { + config_ = builderForValue.build(); + onChanged(); + } else { + configBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder mergeConfig(com.google.cloud.dataproc.v1beta2.ClusterConfig value) { + if (configBuilder_ == null) { + if (config_ != null) { + config_ = + com.google.cloud.dataproc.v1beta2.ClusterConfig.newBuilder(config_).mergeFrom(value).buildPartial(); + } else { + config_ = value; + } + onChanged(); + } else { + configBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public Builder clearConfig() { + if (configBuilder_ == null) { + config_ = null; + onChanged(); + } else { + config_ = null; + configBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder getConfigBuilder() { + + onChanged(); + return getConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder() { + if (configBuilder_ != null) { + return configBuilder_.getMessageOrBuilder(); + } else { + return config_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_; + } + } + /** + *
+     * Required. The cluster configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder> + getConfigFieldBuilder() { + if (configBuilder_ == null) { + configBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder>( + getConfig(), + getParentForChildren(), + isClean()); + config_ = null; + } + return configBuilder_; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged();; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + + public Builder removeLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + public Builder putLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. The labels to associate with this cluster.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given cluster.
+     * 
+ * + * map<string, string> labels = 4; + */ + + public Builder putAllLabels( + java.util.Map values) { + internalGetMutableLabels().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ManagedCluster) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ManagedCluster) + private static final com.google.cloud.dataproc.v1beta2.ManagedCluster DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ManagedCluster(); + } + + public static com.google.cloud.dataproc.v1beta2.ManagedCluster getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ManagedCluster parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ManagedCluster(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedCluster getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedClusterOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedClusterOrBuilder.java new file mode 100644 index 000000000000..c10ac5904513 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedClusterOrBuilder.java @@ -0,0 +1,144 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ManagedClusterOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ManagedCluster) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The cluster name prefix. A unique cluster name will be formed by
+   * appending a random suffix.
+   * The name must contain only lower-case letters (a-z), numbers (0-9),
+   * and hyphens (-). Must begin with a letter. Cannot begin or end with
+   * hyphen. Must consist of between 2 and 35 characters.
+   * 
+ * + * string cluster_name = 2; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The cluster name prefix. A unique cluster name will be formed by
+   * appending a random suffix.
+   * The name must contain only lower-case letters (a-z), numbers (0-9),
+   * and hyphens (-). Must begin with a letter. Cannot begin or end with
+   * hyphen. Must consist of between 2 and 35 characters.
+   * 
+ * + * string cluster_name = 2; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Required. The cluster configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + boolean hasConfig(); + /** + *
+   * Required. The cluster configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig(); + /** + *
+   * Required. The cluster configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterConfig config = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder(); + + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + int getLabelsCount(); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + boolean containsLabels( + java.lang.String key); + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getLabels(); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + java.util.Map + getLabelsMap(); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + + java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. The labels to associate with this cluster.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given cluster.
+   * 
+ * + * map<string, string> labels = 4; + */ + + java.lang.String getLabelsOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfig.java new file mode 100644 index 000000000000..7e37d4eed4db --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfig.java @@ -0,0 +1,739 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies the resources used to actively manage an instance group.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ManagedGroupConfig} + */ +public final class ManagedGroupConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.ManagedGroupConfig) + ManagedGroupConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use ManagedGroupConfig.newBuilder() to construct. + private ManagedGroupConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ManagedGroupConfig() { + instanceTemplateName_ = ""; + instanceGroupManagerName_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ManagedGroupConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + instanceTemplateName_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + instanceGroupManagerName_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.class, com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder.class); + } + + public static final int INSTANCE_TEMPLATE_NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object instanceTemplateName_; + /** + *
+   * Output only. The name of the Instance Template used for the Managed
+   * Instance Group.
+   * 
+ * + * string instance_template_name = 1; + */ + public java.lang.String getInstanceTemplateName() { + java.lang.Object ref = instanceTemplateName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceTemplateName_ = s; + return s; + } + } + /** + *
+   * Output only. The name of the Instance Template used for the Managed
+   * Instance Group.
+   * 
+ * + * string instance_template_name = 1; + */ + public com.google.protobuf.ByteString + getInstanceTemplateNameBytes() { + java.lang.Object ref = instanceTemplateName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceTemplateName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_GROUP_MANAGER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object instanceGroupManagerName_; + /** + *
+   * Output only. The name of the Instance Group Manager for this group.
+   * 
+ * + * string instance_group_manager_name = 2; + */ + public java.lang.String getInstanceGroupManagerName() { + java.lang.Object ref = instanceGroupManagerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceGroupManagerName_ = s; + return s; + } + } + /** + *
+   * Output only. The name of the Instance Group Manager for this group.
+   * 
+ * + * string instance_group_manager_name = 2; + */ + public com.google.protobuf.ByteString + getInstanceGroupManagerNameBytes() { + java.lang.Object ref = instanceGroupManagerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceGroupManagerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getInstanceTemplateNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, instanceTemplateName_); + } + if (!getInstanceGroupManagerNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, instanceGroupManagerName_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getInstanceTemplateNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, instanceTemplateName_); + } + if (!getInstanceGroupManagerNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, instanceGroupManagerName_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.ManagedGroupConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig other = (com.google.cloud.dataproc.v1beta2.ManagedGroupConfig) obj; + + boolean result = true; + result = result && getInstanceTemplateName() + .equals(other.getInstanceTemplateName()); + result = result && getInstanceGroupManagerName() + .equals(other.getInstanceGroupManagerName()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_TEMPLATE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getInstanceTemplateName().hashCode(); + hash = (37 * hash) + INSTANCE_GROUP_MANAGER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getInstanceGroupManagerName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.ManagedGroupConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies the resources used to actively manage an instance group.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.ManagedGroupConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.ManagedGroupConfig) + com.google.cloud.dataproc.v1beta2.ManagedGroupConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.class, com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + instanceTemplateName_ = ""; + + instanceGroupManagerName_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_ManagedGroupConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig build() { + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig result = new com.google.cloud.dataproc.v1beta2.ManagedGroupConfig(this); + result.instanceTemplateName_ = instanceTemplateName_; + result.instanceGroupManagerName_ = instanceGroupManagerName_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.ManagedGroupConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.ManagedGroupConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.ManagedGroupConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.ManagedGroupConfig.getDefaultInstance()) return this; + if (!other.getInstanceTemplateName().isEmpty()) { + instanceTemplateName_ = other.instanceTemplateName_; + onChanged(); + } + if (!other.getInstanceGroupManagerName().isEmpty()) { + instanceGroupManagerName_ = other.instanceGroupManagerName_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.ManagedGroupConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.ManagedGroupConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object instanceTemplateName_ = ""; + /** + *
+     * Output only. The name of the Instance Template used for the Managed
+     * Instance Group.
+     * 
+ * + * string instance_template_name = 1; + */ + public java.lang.String getInstanceTemplateName() { + java.lang.Object ref = instanceTemplateName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceTemplateName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The name of the Instance Template used for the Managed
+     * Instance Group.
+     * 
+ * + * string instance_template_name = 1; + */ + public com.google.protobuf.ByteString + getInstanceTemplateNameBytes() { + java.lang.Object ref = instanceTemplateName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceTemplateName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The name of the Instance Template used for the Managed
+     * Instance Group.
+     * 
+ * + * string instance_template_name = 1; + */ + public Builder setInstanceTemplateName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + instanceTemplateName_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the Instance Template used for the Managed
+     * Instance Group.
+     * 
+ * + * string instance_template_name = 1; + */ + public Builder clearInstanceTemplateName() { + + instanceTemplateName_ = getDefaultInstance().getInstanceTemplateName(); + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the Instance Template used for the Managed
+     * Instance Group.
+     * 
+ * + * string instance_template_name = 1; + */ + public Builder setInstanceTemplateNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + instanceTemplateName_ = value; + onChanged(); + return this; + } + + private java.lang.Object instanceGroupManagerName_ = ""; + /** + *
+     * Output only. The name of the Instance Group Manager for this group.
+     * 
+ * + * string instance_group_manager_name = 2; + */ + public java.lang.String getInstanceGroupManagerName() { + java.lang.Object ref = instanceGroupManagerName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceGroupManagerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The name of the Instance Group Manager for this group.
+     * 
+ * + * string instance_group_manager_name = 2; + */ + public com.google.protobuf.ByteString + getInstanceGroupManagerNameBytes() { + java.lang.Object ref = instanceGroupManagerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + instanceGroupManagerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The name of the Instance Group Manager for this group.
+     * 
+ * + * string instance_group_manager_name = 2; + */ + public Builder setInstanceGroupManagerName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + instanceGroupManagerName_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the Instance Group Manager for this group.
+     * 
+ * + * string instance_group_manager_name = 2; + */ + public Builder clearInstanceGroupManagerName() { + + instanceGroupManagerName_ = getDefaultInstance().getInstanceGroupManagerName(); + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the Instance Group Manager for this group.
+     * 
+ * + * string instance_group_manager_name = 2; + */ + public Builder setInstanceGroupManagerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + instanceGroupManagerName_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.ManagedGroupConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ManagedGroupConfig) + private static final com.google.cloud.dataproc.v1beta2.ManagedGroupConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.ManagedGroupConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.ManagedGroupConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ManagedGroupConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ManagedGroupConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.ManagedGroupConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfigOrBuilder.java new file mode 100644 index 000000000000..e3a00f5aae5f --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfigOrBuilder.java @@ -0,0 +1,47 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface ManagedGroupConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.ManagedGroupConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The name of the Instance Template used for the Managed
+   * Instance Group.
+   * 
+ * + * string instance_template_name = 1; + */ + java.lang.String getInstanceTemplateName(); + /** + *
+   * Output only. The name of the Instance Template used for the Managed
+   * Instance Group.
+   * 
+ * + * string instance_template_name = 1; + */ + com.google.protobuf.ByteString + getInstanceTemplateNameBytes(); + + /** + *
+   * Output only. The name of the Instance Group Manager for this group.
+   * 
+ * + * string instance_group_manager_name = 2; + */ + java.lang.String getInstanceGroupManagerName(); + /** + *
+   * Output only. The name of the Instance Group Manager for this group.
+   * 
+ * + * string instance_group_manager_name = 2; + */ + com.google.protobuf.ByteString + getInstanceGroupManagerNameBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationAction.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationAction.java new file mode 100644 index 000000000000..25fbe29279f4 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationAction.java @@ -0,0 +1,844 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies an executable to run on a fully configured node and a
+ * timeout period for executable completion.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.NodeInitializationAction} + */ +public final class NodeInitializationAction extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.NodeInitializationAction) + NodeInitializationActionOrBuilder { +private static final long serialVersionUID = 0L; + // Use NodeInitializationAction.newBuilder() to construct. + private NodeInitializationAction(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private NodeInitializationAction() { + executableFile_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private NodeInitializationAction( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + executableFile_ = s; + break; + } + case 18: { + com.google.protobuf.Duration.Builder subBuilder = null; + if (executionTimeout_ != null) { + subBuilder = executionTimeout_.toBuilder(); + } + executionTimeout_ = input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(executionTimeout_); + executionTimeout_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.NodeInitializationAction.class, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder.class); + } + + public static final int EXECUTABLE_FILE_FIELD_NUMBER = 1; + private volatile java.lang.Object executableFile_; + /** + *
+   * Required. Cloud Storage URI of executable file.
+   * 
+ * + * string executable_file = 1; + */ + public java.lang.String getExecutableFile() { + java.lang.Object ref = executableFile_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + executableFile_ = s; + return s; + } + } + /** + *
+   * Required. Cloud Storage URI of executable file.
+   * 
+ * + * string executable_file = 1; + */ + public com.google.protobuf.ByteString + getExecutableFileBytes() { + java.lang.Object ref = executableFile_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + executableFile_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXECUTION_TIMEOUT_FIELD_NUMBER = 2; + private com.google.protobuf.Duration executionTimeout_; + /** + *
+   * Optional. Amount of time executable has to complete. Default is
+   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * name of the executable that caused the error and the exceeded timeout
+   * period) if the executable is not completed at end of the timeout period.
+   * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public boolean hasExecutionTimeout() { + return executionTimeout_ != null; + } + /** + *
+   * Optional. Amount of time executable has to complete. Default is
+   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * name of the executable that caused the error and the exceeded timeout
+   * period) if the executable is not completed at end of the timeout period.
+   * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public com.google.protobuf.Duration getExecutionTimeout() { + return executionTimeout_ == null ? com.google.protobuf.Duration.getDefaultInstance() : executionTimeout_; + } + /** + *
+   * Optional. Amount of time executable has to complete. Default is
+   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * name of the executable that caused the error and the exceeded timeout
+   * period) if the executable is not completed at end of the timeout period.
+   * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public com.google.protobuf.DurationOrBuilder getExecutionTimeoutOrBuilder() { + return getExecutionTimeout(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getExecutableFileBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, executableFile_); + } + if (executionTimeout_ != null) { + output.writeMessage(2, getExecutionTimeout()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getExecutableFileBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, executableFile_); + } + if (executionTimeout_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getExecutionTimeout()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.NodeInitializationAction)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.NodeInitializationAction other = (com.google.cloud.dataproc.v1beta2.NodeInitializationAction) obj; + + boolean result = true; + result = result && getExecutableFile() + .equals(other.getExecutableFile()); + result = result && (hasExecutionTimeout() == other.hasExecutionTimeout()); + if (hasExecutionTimeout()) { + result = result && getExecutionTimeout() + .equals(other.getExecutionTimeout()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + EXECUTABLE_FILE_FIELD_NUMBER; + hash = (53 * hash) + getExecutableFile().hashCode(); + if (hasExecutionTimeout()) { + hash = (37 * hash) + EXECUTION_TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + getExecutionTimeout().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.NodeInitializationAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies an executable to run on a fully configured node and a
+   * timeout period for executable completion.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.NodeInitializationAction} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.NodeInitializationAction) + com.google.cloud.dataproc.v1beta2.NodeInitializationActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.NodeInitializationAction.class, com.google.cloud.dataproc.v1beta2.NodeInitializationAction.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.NodeInitializationAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + executableFile_ = ""; + + if (executionTimeoutBuilder_ == null) { + executionTimeout_ = null; + } else { + executionTimeout_ = null; + executionTimeoutBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_NodeInitializationAction_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.NodeInitializationAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction build() { + com.google.cloud.dataproc.v1beta2.NodeInitializationAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction buildPartial() { + com.google.cloud.dataproc.v1beta2.NodeInitializationAction result = new com.google.cloud.dataproc.v1beta2.NodeInitializationAction(this); + result.executableFile_ = executableFile_; + if (executionTimeoutBuilder_ == null) { + result.executionTimeout_ = executionTimeout_; + } else { + result.executionTimeout_ = executionTimeoutBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.NodeInitializationAction) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.NodeInitializationAction)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.NodeInitializationAction other) { + if (other == com.google.cloud.dataproc.v1beta2.NodeInitializationAction.getDefaultInstance()) return this; + if (!other.getExecutableFile().isEmpty()) { + executableFile_ = other.executableFile_; + onChanged(); + } + if (other.hasExecutionTimeout()) { + mergeExecutionTimeout(other.getExecutionTimeout()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.NodeInitializationAction parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.NodeInitializationAction) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object executableFile_ = ""; + /** + *
+     * Required. Cloud Storage URI of executable file.
+     * 
+ * + * string executable_file = 1; + */ + public java.lang.String getExecutableFile() { + java.lang.Object ref = executableFile_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + executableFile_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. Cloud Storage URI of executable file.
+     * 
+ * + * string executable_file = 1; + */ + public com.google.protobuf.ByteString + getExecutableFileBytes() { + java.lang.Object ref = executableFile_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + executableFile_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. Cloud Storage URI of executable file.
+     * 
+ * + * string executable_file = 1; + */ + public Builder setExecutableFile( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + executableFile_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. Cloud Storage URI of executable file.
+     * 
+ * + * string executable_file = 1; + */ + public Builder clearExecutableFile() { + + executableFile_ = getDefaultInstance().getExecutableFile(); + onChanged(); + return this; + } + /** + *
+     * Required. Cloud Storage URI of executable file.
+     * 
+ * + * string executable_file = 1; + */ + public Builder setExecutableFileBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + executableFile_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Duration executionTimeout_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> executionTimeoutBuilder_; + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public boolean hasExecutionTimeout() { + return executionTimeoutBuilder_ != null || executionTimeout_ != null; + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public com.google.protobuf.Duration getExecutionTimeout() { + if (executionTimeoutBuilder_ == null) { + return executionTimeout_ == null ? com.google.protobuf.Duration.getDefaultInstance() : executionTimeout_; + } else { + return executionTimeoutBuilder_.getMessage(); + } + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public Builder setExecutionTimeout(com.google.protobuf.Duration value) { + if (executionTimeoutBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + executionTimeout_ = value; + onChanged(); + } else { + executionTimeoutBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public Builder setExecutionTimeout( + com.google.protobuf.Duration.Builder builderForValue) { + if (executionTimeoutBuilder_ == null) { + executionTimeout_ = builderForValue.build(); + onChanged(); + } else { + executionTimeoutBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public Builder mergeExecutionTimeout(com.google.protobuf.Duration value) { + if (executionTimeoutBuilder_ == null) { + if (executionTimeout_ != null) { + executionTimeout_ = + com.google.protobuf.Duration.newBuilder(executionTimeout_).mergeFrom(value).buildPartial(); + } else { + executionTimeout_ = value; + } + onChanged(); + } else { + executionTimeoutBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public Builder clearExecutionTimeout() { + if (executionTimeoutBuilder_ == null) { + executionTimeout_ = null; + onChanged(); + } else { + executionTimeout_ = null; + executionTimeoutBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public com.google.protobuf.Duration.Builder getExecutionTimeoutBuilder() { + + onChanged(); + return getExecutionTimeoutFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + public com.google.protobuf.DurationOrBuilder getExecutionTimeoutOrBuilder() { + if (executionTimeoutBuilder_ != null) { + return executionTimeoutBuilder_.getMessageOrBuilder(); + } else { + return executionTimeout_ == null ? + com.google.protobuf.Duration.getDefaultInstance() : executionTimeout_; + } + } + /** + *
+     * Optional. Amount of time executable has to complete. Default is
+     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * name of the executable that caused the error and the exceeded timeout
+     * period) if the executable is not completed at end of the timeout period.
+     * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> + getExecutionTimeoutFieldBuilder() { + if (executionTimeoutBuilder_ == null) { + executionTimeoutBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder>( + getExecutionTimeout(), + getParentForChildren(), + isClean()); + executionTimeout_ = null; + } + return executionTimeoutBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.NodeInitializationAction) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.NodeInitializationAction) + private static final com.google.cloud.dataproc.v1beta2.NodeInitializationAction DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.NodeInitializationAction(); + } + + public static com.google.cloud.dataproc.v1beta2.NodeInitializationAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public NodeInitializationAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NodeInitializationAction(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.NodeInitializationAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationActionOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationActionOrBuilder.java new file mode 100644 index 000000000000..76ca5a58a926 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationActionOrBuilder.java @@ -0,0 +1,61 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface NodeInitializationActionOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.NodeInitializationAction) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. Cloud Storage URI of executable file.
+   * 
+ * + * string executable_file = 1; + */ + java.lang.String getExecutableFile(); + /** + *
+   * Required. Cloud Storage URI of executable file.
+   * 
+ * + * string executable_file = 1; + */ + com.google.protobuf.ByteString + getExecutableFileBytes(); + + /** + *
+   * Optional. Amount of time executable has to complete. Default is
+   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * name of the executable that caused the error and the exceeded timeout
+   * period) if the executable is not completed at end of the timeout period.
+   * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + boolean hasExecutionTimeout(); + /** + *
+   * Optional. Amount of time executable has to complete. Default is
+   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * name of the executable that caused the error and the exceeded timeout
+   * period) if the executable is not completed at end of the timeout period.
+   * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + com.google.protobuf.Duration getExecutionTimeout(); + /** + *
+   * Optional. Amount of time executable has to complete. Default is
+   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * name of the executable that caused the error and the exceeded timeout
+   * period) if the executable is not completed at end of the timeout period.
+   * 
+ * + * .google.protobuf.Duration execution_timeout = 2; + */ + com.google.protobuf.DurationOrBuilder getExecutionTimeoutOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OperationsProto.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OperationsProto.java new file mode 100644 index 000000000000..e8c37aa6b171 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OperationsProto.java @@ -0,0 +1,103 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/operations.proto + +package com.google.cloud.dataproc.v1beta2; + +public final class OperationsProto { + private OperationsProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_LabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_LabelsEntry_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n.google/cloud/dataproc/v1beta2/operatio" + + "ns.proto\022\035google.cloud.dataproc.v1beta2\032" + + "\034google/api/annotations.proto\032\037google/pr" + + "otobuf/timestamp.proto\"\372\001\n\026ClusterOperat" + + "ionStatus\022J\n\005state\030\001 \001(\0162;.google.cloud." + + "dataproc.v1beta2.ClusterOperationStatus." + + "State\022\023\n\013inner_state\030\002 \001(\t\022\017\n\007details\030\003 " + + "\001(\t\0224\n\020state_start_time\030\004 \001(\0132\032.google.p" + + "rotobuf.Timestamp\"8\n\005State\022\013\n\007UNKNOWN\020\000\022" + + "\013\n\007PENDING\020\001\022\013\n\007RUNNING\020\002\022\010\n\004DONE\020\003\"\237\003\n\030" + + "ClusterOperationMetadata\022\024\n\014cluster_name" + + "\030\007 \001(\t\022\024\n\014cluster_uuid\030\010 \001(\t\022E\n\006status\030\t" + + " \001(\01325.google.cloud.dataproc.v1beta2.Clu" + + "sterOperationStatus\022M\n\016status_history\030\n " + + "\003(\01325.google.cloud.dataproc.v1beta2.Clus" + + "terOperationStatus\022\026\n\016operation_type\030\013 \001" + + "(\t\022\023\n\013description\030\014 \001(\t\022S\n\006labels\030\r \003(\0132" + + "C.google.cloud.dataproc.v1beta2.ClusterO" + + "perationMetadata.LabelsEntry\022\020\n\010warnings" + + "\030\016 \003(\t\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005va" + + "lue\030\002 \001(\t:\0028\001B}\n!com.google.cloud.datapr" + + "oc.v1beta2B\017OperationsProtoP\001ZEgoogle.go" + + "lang.org/genproto/googleapis/cloud/datap" + + "roc/v1beta2;dataprocb\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }, assigner); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationStatus_descriptor, + new java.lang.String[] { "State", "InnerState", "Details", "StateStartTime", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor, + new java.lang.String[] { "ClusterName", "ClusterUuid", "Status", "StatusHistory", "OperationType", "Description", "Labels", "Warnings", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_LabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterOperationMetadata_LabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java new file mode 100644 index 000000000000..e8f4c4e72b07 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java @@ -0,0 +1,3065 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A job executed by the workflow.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.OrderedJob} + */ +public final class OrderedJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.OrderedJob) + OrderedJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use OrderedJob.newBuilder() to construct. + private OrderedJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private OrderedJob() { + stepId_ = ""; + prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OrderedJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + stepId_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.HadoopJob.Builder subBuilder = null; + if (jobTypeCase_ == 2) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_).toBuilder(); + } + jobType_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.HadoopJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_); + jobType_ = subBuilder.buildPartial(); + } + jobTypeCase_ = 2; + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.SparkJob.Builder subBuilder = null; + if (jobTypeCase_ == 3) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.SparkJob) jobType_).toBuilder(); + } + jobType_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.SparkJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.SparkJob) jobType_); + jobType_ = subBuilder.buildPartial(); + } + jobTypeCase_ = 3; + break; + } + case 34: { + com.google.cloud.dataproc.v1beta2.PySparkJob.Builder subBuilder = null; + if (jobTypeCase_ == 4) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_).toBuilder(); + } + jobType_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.PySparkJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_); + jobType_ = subBuilder.buildPartial(); + } + jobTypeCase_ = 4; + break; + } + case 42: { + com.google.cloud.dataproc.v1beta2.HiveJob.Builder subBuilder = null; + if (jobTypeCase_ == 5) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.HiveJob) jobType_).toBuilder(); + } + jobType_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.HiveJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.HiveJob) jobType_); + jobType_ = subBuilder.buildPartial(); + } + jobTypeCase_ = 5; + break; + } + case 50: { + com.google.cloud.dataproc.v1beta2.PigJob.Builder subBuilder = null; + if (jobTypeCase_ == 6) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.PigJob) jobType_).toBuilder(); + } + jobType_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.PigJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.PigJob) jobType_); + jobType_ = subBuilder.buildPartial(); + } + jobTypeCase_ = 6; + break; + } + case 58: { + com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder subBuilder = null; + if (jobTypeCase_ == 7) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_).toBuilder(); + } + jobType_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.SparkSqlJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_); + jobType_ = subBuilder.buildPartial(); + } + jobTypeCase_ = 7; + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000080; + } + com.google.protobuf.MapEntry + labels__ = input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put( + labels__.getKey(), labels__.getValue()); + break; + } + case 74: { + com.google.cloud.dataproc.v1beta2.JobScheduling.Builder subBuilder = null; + if (scheduling_ != null) { + subBuilder = scheduling_.toBuilder(); + } + scheduling_ = input.readMessage(com.google.cloud.dataproc.v1beta2.JobScheduling.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(scheduling_); + scheduling_ = subBuilder.buildPartial(); + } + + break; + } + case 82: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + prerequisiteStepIds_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000200; + } + prerequisiteStepIds_.add(s); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + prerequisiteStepIds_ = prerequisiteStepIds_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 8: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_OrderedJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.OrderedJob.class, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder.class); + } + + private int bitField0_; + private int jobTypeCase_ = 0; + private java.lang.Object jobType_; + public enum JobTypeCase + implements com.google.protobuf.Internal.EnumLite { + HADOOP_JOB(2), + SPARK_JOB(3), + PYSPARK_JOB(4), + HIVE_JOB(5), + PIG_JOB(6), + SPARK_SQL_JOB(7), + JOBTYPE_NOT_SET(0); + private final int value; + private JobTypeCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static JobTypeCase valueOf(int value) { + return forNumber(value); + } + + public static JobTypeCase forNumber(int value) { + switch (value) { + case 2: return HADOOP_JOB; + case 3: return SPARK_JOB; + case 4: return PYSPARK_JOB; + case 5: return HIVE_JOB; + case 6: return PIG_JOB; + case 7: return SPARK_SQL_JOB; + case 0: return JOBTYPE_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public JobTypeCase + getJobTypeCase() { + return JobTypeCase.forNumber( + jobTypeCase_); + } + + public static final int STEP_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object stepId_; + /** + *
+   * Required. The step id. The id must be unique among all jobs
+   * within the template.
+   * The step id is used as prefix for job id, as job
+   * `goog-dataproc-workflow-step-id` label, and in
+   * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+   * steps.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string step_id = 1; + */ + public java.lang.String getStepId() { + java.lang.Object ref = stepId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stepId_ = s; + return s; + } + } + /** + *
+   * Required. The step id. The id must be unique among all jobs
+   * within the template.
+   * The step id is used as prefix for job id, as job
+   * `goog-dataproc-workflow-step-id` label, and in
+   * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+   * steps.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string step_id = 1; + */ + public com.google.protobuf.ByteString + getStepIdBytes() { + java.lang.Object ref = stepId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + stepId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int HADOOP_JOB_FIELD_NUMBER = 2; + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public boolean hasHadoopJob() { + return jobTypeCase_ == 2; + } + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJob getHadoopJob() { + if (jobTypeCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder getHadoopJobOrBuilder() { + if (jobTypeCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + + public static final int SPARK_JOB_FIELD_NUMBER = 3; + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public boolean hasSparkJob() { + return jobTypeCase_ == 3; + } + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.SparkJob getSparkJob() { + if (jobTypeCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder getSparkJobOrBuilder() { + if (jobTypeCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + + public static final int PYSPARK_JOB_FIELD_NUMBER = 4; + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public boolean hasPysparkJob() { + return jobTypeCase_ == 4; + } + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJob getPysparkJob() { + if (jobTypeCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder getPysparkJobOrBuilder() { + if (jobTypeCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + + public static final int HIVE_JOB_FIELD_NUMBER = 5; + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public boolean hasHiveJob() { + return jobTypeCase_ == 5; + } + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.HiveJob getHiveJob() { + if (jobTypeCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder getHiveJobOrBuilder() { + if (jobTypeCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + + public static final int PIG_JOB_FIELD_NUMBER = 6; + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public boolean hasPigJob() { + return jobTypeCase_ == 6; + } + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.PigJob getPigJob() { + if (jobTypeCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.PigJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.PigJobOrBuilder getPigJobOrBuilder() { + if (jobTypeCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.PigJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + + public static final int SPARK_SQL_JOB_FIELD_NUMBER = 7; + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public boolean hasSparkSqlJob() { + return jobTypeCase_ == 7; + } + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJob getSparkSqlJob() { + if (jobTypeCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { + if (jobTypeCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + + public static final int LABELS_FIELD_NUMBER = 8; + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_OrderedJob_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int SCHEDULING_FIELD_NUMBER = 9; + private com.google.cloud.dataproc.v1beta2.JobScheduling scheduling_; + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public boolean hasScheduling() { + return scheduling_ != null; + } + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public com.google.cloud.dataproc.v1beta2.JobScheduling getScheduling() { + return scheduling_ == null ? com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance() : scheduling_; + } + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder getSchedulingOrBuilder() { + return getScheduling(); + } + + public static final int PREREQUISITE_STEP_IDS_FIELD_NUMBER = 10; + private com.google.protobuf.LazyStringList prerequisiteStepIds_; + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public com.google.protobuf.ProtocolStringList + getPrerequisiteStepIdsList() { + return prerequisiteStepIds_; + } + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public int getPrerequisiteStepIdsCount() { + return prerequisiteStepIds_.size(); + } + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public java.lang.String getPrerequisiteStepIds(int index) { + return prerequisiteStepIds_.get(index); + } + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public com.google.protobuf.ByteString + getPrerequisiteStepIdsBytes(int index) { + return prerequisiteStepIds_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getStepIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, stepId_); + } + if (jobTypeCase_ == 2) { + output.writeMessage(2, (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_); + } + if (jobTypeCase_ == 3) { + output.writeMessage(3, (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_); + } + if (jobTypeCase_ == 4) { + output.writeMessage(4, (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_); + } + if (jobTypeCase_ == 5) { + output.writeMessage(5, (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_); + } + if (jobTypeCase_ == 6) { + output.writeMessage(6, (com.google.cloud.dataproc.v1beta2.PigJob) jobType_); + } + if (jobTypeCase_ == 7) { + output.writeMessage(7, (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetLabels(), + LabelsDefaultEntryHolder.defaultEntry, + 8); + if (scheduling_ != null) { + output.writeMessage(9, getScheduling()); + } + for (int i = 0; i < prerequisiteStepIds_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 10, prerequisiteStepIds_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getStepIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, stepId_); + } + if (jobTypeCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_); + } + if (jobTypeCase_ == 3) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_); + } + if (jobTypeCase_ == 4) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_); + } + if (jobTypeCase_ == 5) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_); + } + if (jobTypeCase_ == 6) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, (com.google.cloud.dataproc.v1beta2.PigJob) jobType_); + } + if (jobTypeCase_ == 7) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_); + } + for (java.util.Map.Entry entry + : internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + labels__ = LabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, labels__); + } + if (scheduling_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, getScheduling()); + } + { + int dataSize = 0; + for (int i = 0; i < prerequisiteStepIds_.size(); i++) { + dataSize += computeStringSizeNoTag(prerequisiteStepIds_.getRaw(i)); + } + size += dataSize; + size += 1 * getPrerequisiteStepIdsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.OrderedJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.OrderedJob other = (com.google.cloud.dataproc.v1beta2.OrderedJob) obj; + + boolean result = true; + result = result && getStepId() + .equals(other.getStepId()); + result = result && internalGetLabels().equals( + other.internalGetLabels()); + result = result && (hasScheduling() == other.hasScheduling()); + if (hasScheduling()) { + result = result && getScheduling() + .equals(other.getScheduling()); + } + result = result && getPrerequisiteStepIdsList() + .equals(other.getPrerequisiteStepIdsList()); + result = result && getJobTypeCase().equals( + other.getJobTypeCase()); + if (!result) return false; + switch (jobTypeCase_) { + case 2: + result = result && getHadoopJob() + .equals(other.getHadoopJob()); + break; + case 3: + result = result && getSparkJob() + .equals(other.getSparkJob()); + break; + case 4: + result = result && getPysparkJob() + .equals(other.getPysparkJob()); + break; + case 5: + result = result && getHiveJob() + .equals(other.getHiveJob()); + break; + case 6: + result = result && getPigJob() + .equals(other.getPigJob()); + break; + case 7: + result = result && getSparkSqlJob() + .equals(other.getSparkSqlJob()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + STEP_ID_FIELD_NUMBER; + hash = (53 * hash) + getStepId().hashCode(); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasScheduling()) { + hash = (37 * hash) + SCHEDULING_FIELD_NUMBER; + hash = (53 * hash) + getScheduling().hashCode(); + } + if (getPrerequisiteStepIdsCount() > 0) { + hash = (37 * hash) + PREREQUISITE_STEP_IDS_FIELD_NUMBER; + hash = (53 * hash) + getPrerequisiteStepIdsList().hashCode(); + } + switch (jobTypeCase_) { + case 2: + hash = (37 * hash) + HADOOP_JOB_FIELD_NUMBER; + hash = (53 * hash) + getHadoopJob().hashCode(); + break; + case 3: + hash = (37 * hash) + SPARK_JOB_FIELD_NUMBER; + hash = (53 * hash) + getSparkJob().hashCode(); + break; + case 4: + hash = (37 * hash) + PYSPARK_JOB_FIELD_NUMBER; + hash = (53 * hash) + getPysparkJob().hashCode(); + break; + case 5: + hash = (37 * hash) + HIVE_JOB_FIELD_NUMBER; + hash = (53 * hash) + getHiveJob().hashCode(); + break; + case 6: + hash = (37 * hash) + PIG_JOB_FIELD_NUMBER; + hash = (53 * hash) + getPigJob().hashCode(); + break; + case 7: + hash = (37 * hash) + SPARK_SQL_JOB_FIELD_NUMBER; + hash = (53 * hash) + getSparkSqlJob().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.OrderedJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.OrderedJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A job executed by the workflow.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.OrderedJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.OrderedJob) + com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 8: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 8: + return internalGetMutableLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_OrderedJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.OrderedJob.class, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.OrderedJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + stepId_ = ""; + + internalGetMutableLabels().clear(); + if (schedulingBuilder_ == null) { + scheduling_ = null; + } else { + scheduling_ = null; + schedulingBuilder_ = null; + } + prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000200); + jobTypeCase_ = 0; + jobType_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.OrderedJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.OrderedJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.OrderedJob build() { + com.google.cloud.dataproc.v1beta2.OrderedJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.OrderedJob buildPartial() { + com.google.cloud.dataproc.v1beta2.OrderedJob result = new com.google.cloud.dataproc.v1beta2.OrderedJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.stepId_ = stepId_; + if (jobTypeCase_ == 2) { + if (hadoopJobBuilder_ == null) { + result.jobType_ = jobType_; + } else { + result.jobType_ = hadoopJobBuilder_.build(); + } + } + if (jobTypeCase_ == 3) { + if (sparkJobBuilder_ == null) { + result.jobType_ = jobType_; + } else { + result.jobType_ = sparkJobBuilder_.build(); + } + } + if (jobTypeCase_ == 4) { + if (pysparkJobBuilder_ == null) { + result.jobType_ = jobType_; + } else { + result.jobType_ = pysparkJobBuilder_.build(); + } + } + if (jobTypeCase_ == 5) { + if (hiveJobBuilder_ == null) { + result.jobType_ = jobType_; + } else { + result.jobType_ = hiveJobBuilder_.build(); + } + } + if (jobTypeCase_ == 6) { + if (pigJobBuilder_ == null) { + result.jobType_ = jobType_; + } else { + result.jobType_ = pigJobBuilder_.build(); + } + } + if (jobTypeCase_ == 7) { + if (sparkSqlJobBuilder_ == null) { + result.jobType_ = jobType_; + } else { + result.jobType_ = sparkSqlJobBuilder_.build(); + } + } + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + if (schedulingBuilder_ == null) { + result.scheduling_ = scheduling_; + } else { + result.scheduling_ = schedulingBuilder_.build(); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + prerequisiteStepIds_ = prerequisiteStepIds_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.prerequisiteStepIds_ = prerequisiteStepIds_; + result.bitField0_ = to_bitField0_; + result.jobTypeCase_ = jobTypeCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.OrderedJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.OrderedJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.OrderedJob other) { + if (other == com.google.cloud.dataproc.v1beta2.OrderedJob.getDefaultInstance()) return this; + if (!other.getStepId().isEmpty()) { + stepId_ = other.stepId_; + onChanged(); + } + internalGetMutableLabels().mergeFrom( + other.internalGetLabels()); + if (other.hasScheduling()) { + mergeScheduling(other.getScheduling()); + } + if (!other.prerequisiteStepIds_.isEmpty()) { + if (prerequisiteStepIds_.isEmpty()) { + prerequisiteStepIds_ = other.prerequisiteStepIds_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.addAll(other.prerequisiteStepIds_); + } + onChanged(); + } + switch (other.getJobTypeCase()) { + case HADOOP_JOB: { + mergeHadoopJob(other.getHadoopJob()); + break; + } + case SPARK_JOB: { + mergeSparkJob(other.getSparkJob()); + break; + } + case PYSPARK_JOB: { + mergePysparkJob(other.getPysparkJob()); + break; + } + case HIVE_JOB: { + mergeHiveJob(other.getHiveJob()); + break; + } + case PIG_JOB: { + mergePigJob(other.getPigJob()); + break; + } + case SPARK_SQL_JOB: { + mergeSparkSqlJob(other.getSparkSqlJob()); + break; + } + case JOBTYPE_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.OrderedJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.OrderedJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int jobTypeCase_ = 0; + private java.lang.Object jobType_; + public JobTypeCase + getJobTypeCase() { + return JobTypeCase.forNumber( + jobTypeCase_); + } + + public Builder clearJobType() { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object stepId_ = ""; + /** + *
+     * Required. The step id. The id must be unique among all jobs
+     * within the template.
+     * The step id is used as prefix for job id, as job
+     * `goog-dataproc-workflow-step-id` label, and in
+     * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+     * steps.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string step_id = 1; + */ + public java.lang.String getStepId() { + java.lang.Object ref = stepId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stepId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The step id. The id must be unique among all jobs
+     * within the template.
+     * The step id is used as prefix for job id, as job
+     * `goog-dataproc-workflow-step-id` label, and in
+     * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+     * steps.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string step_id = 1; + */ + public com.google.protobuf.ByteString + getStepIdBytes() { + java.lang.Object ref = stepId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + stepId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The step id. The id must be unique among all jobs
+     * within the template.
+     * The step id is used as prefix for job id, as job
+     * `goog-dataproc-workflow-step-id` label, and in
+     * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+     * steps.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string step_id = 1; + */ + public Builder setStepId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + stepId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The step id. The id must be unique among all jobs
+     * within the template.
+     * The step id is used as prefix for job id, as job
+     * `goog-dataproc-workflow-step-id` label, and in
+     * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+     * steps.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string step_id = 1; + */ + public Builder clearStepId() { + + stepId_ = getDefaultInstance().getStepId(); + onChanged(); + return this; + } + /** + *
+     * Required. The step id. The id must be unique among all jobs
+     * within the template.
+     * The step id is used as prefix for job id, as job
+     * `goog-dataproc-workflow-step-id` label, and in
+     * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+     * steps.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string step_id = 1; + */ + public Builder setStepIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + stepId_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HadoopJob, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder, com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder> hadoopJobBuilder_; + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public boolean hasHadoopJob() { + return jobTypeCase_ == 2; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJob getHadoopJob() { + if (hadoopJobBuilder_ == null) { + if (jobTypeCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } else { + if (jobTypeCase_ == 2) { + return hadoopJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public Builder setHadoopJob(com.google.cloud.dataproc.v1beta2.HadoopJob value) { + if (hadoopJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + jobType_ = value; + onChanged(); + } else { + hadoopJobBuilder_.setMessage(value); + } + jobTypeCase_ = 2; + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public Builder setHadoopJob( + com.google.cloud.dataproc.v1beta2.HadoopJob.Builder builderForValue) { + if (hadoopJobBuilder_ == null) { + jobType_ = builderForValue.build(); + onChanged(); + } else { + hadoopJobBuilder_.setMessage(builderForValue.build()); + } + jobTypeCase_ = 2; + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public Builder mergeHadoopJob(com.google.cloud.dataproc.v1beta2.HadoopJob value) { + if (hadoopJobBuilder_ == null) { + if (jobTypeCase_ == 2 && + jobType_ != com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance()) { + jobType_ = com.google.cloud.dataproc.v1beta2.HadoopJob.newBuilder((com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_) + .mergeFrom(value).buildPartial(); + } else { + jobType_ = value; + } + onChanged(); + } else { + if (jobTypeCase_ == 2) { + hadoopJobBuilder_.mergeFrom(value); + } + hadoopJobBuilder_.setMessage(value); + } + jobTypeCase_ = 2; + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public Builder clearHadoopJob() { + if (hadoopJobBuilder_ == null) { + if (jobTypeCase_ == 2) { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + } + } else { + if (jobTypeCase_ == 2) { + jobTypeCase_ = 0; + jobType_ = null; + } + hadoopJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJob.Builder getHadoopJobBuilder() { + return getHadoopJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + public com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder getHadoopJobOrBuilder() { + if ((jobTypeCase_ == 2) && (hadoopJobBuilder_ != null)) { + return hadoopJobBuilder_.getMessageOrBuilder(); + } else { + if (jobTypeCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hadoop job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HadoopJob, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder, com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder> + getHadoopJobFieldBuilder() { + if (hadoopJobBuilder_ == null) { + if (!(jobTypeCase_ == 2)) { + jobType_ = com.google.cloud.dataproc.v1beta2.HadoopJob.getDefaultInstance(); + } + hadoopJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HadoopJob, com.google.cloud.dataproc.v1beta2.HadoopJob.Builder, com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.HadoopJob) jobType_, + getParentForChildren(), + isClean()); + jobType_ = null; + } + jobTypeCase_ = 2; + onChanged();; + return hadoopJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkJob, com.google.cloud.dataproc.v1beta2.SparkJob.Builder, com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder> sparkJobBuilder_; + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public boolean hasSparkJob() { + return jobTypeCase_ == 3; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.SparkJob getSparkJob() { + if (sparkJobBuilder_ == null) { + if (jobTypeCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } else { + if (jobTypeCase_ == 3) { + return sparkJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public Builder setSparkJob(com.google.cloud.dataproc.v1beta2.SparkJob value) { + if (sparkJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + jobType_ = value; + onChanged(); + } else { + sparkJobBuilder_.setMessage(value); + } + jobTypeCase_ = 3; + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public Builder setSparkJob( + com.google.cloud.dataproc.v1beta2.SparkJob.Builder builderForValue) { + if (sparkJobBuilder_ == null) { + jobType_ = builderForValue.build(); + onChanged(); + } else { + sparkJobBuilder_.setMessage(builderForValue.build()); + } + jobTypeCase_ = 3; + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public Builder mergeSparkJob(com.google.cloud.dataproc.v1beta2.SparkJob value) { + if (sparkJobBuilder_ == null) { + if (jobTypeCase_ == 3 && + jobType_ != com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance()) { + jobType_ = com.google.cloud.dataproc.v1beta2.SparkJob.newBuilder((com.google.cloud.dataproc.v1beta2.SparkJob) jobType_) + .mergeFrom(value).buildPartial(); + } else { + jobType_ = value; + } + onChanged(); + } else { + if (jobTypeCase_ == 3) { + sparkJobBuilder_.mergeFrom(value); + } + sparkJobBuilder_.setMessage(value); + } + jobTypeCase_ = 3; + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public Builder clearSparkJob() { + if (sparkJobBuilder_ == null) { + if (jobTypeCase_ == 3) { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + } + } else { + if (jobTypeCase_ == 3) { + jobTypeCase_ = 0; + jobType_ = null; + } + sparkJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.SparkJob.Builder getSparkJobBuilder() { + return getSparkJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + public com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder getSparkJobOrBuilder() { + if ((jobTypeCase_ == 3) && (sparkJobBuilder_ != null)) { + return sparkJobBuilder_.getMessageOrBuilder(); + } else { + if (jobTypeCase_ == 3) { + return (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Spark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkJob, com.google.cloud.dataproc.v1beta2.SparkJob.Builder, com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder> + getSparkJobFieldBuilder() { + if (sparkJobBuilder_ == null) { + if (!(jobTypeCase_ == 3)) { + jobType_ = com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + sparkJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkJob, com.google.cloud.dataproc.v1beta2.SparkJob.Builder, com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.SparkJob) jobType_, + getParentForChildren(), + isClean()); + jobType_ = null; + } + jobTypeCase_ = 3; + onChanged();; + return sparkJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PySparkJob, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder, com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder> pysparkJobBuilder_; + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public boolean hasPysparkJob() { + return jobTypeCase_ == 4; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJob getPysparkJob() { + if (pysparkJobBuilder_ == null) { + if (jobTypeCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } else { + if (jobTypeCase_ == 4) { + return pysparkJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public Builder setPysparkJob(com.google.cloud.dataproc.v1beta2.PySparkJob value) { + if (pysparkJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + jobType_ = value; + onChanged(); + } else { + pysparkJobBuilder_.setMessage(value); + } + jobTypeCase_ = 4; + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public Builder setPysparkJob( + com.google.cloud.dataproc.v1beta2.PySparkJob.Builder builderForValue) { + if (pysparkJobBuilder_ == null) { + jobType_ = builderForValue.build(); + onChanged(); + } else { + pysparkJobBuilder_.setMessage(builderForValue.build()); + } + jobTypeCase_ = 4; + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public Builder mergePysparkJob(com.google.cloud.dataproc.v1beta2.PySparkJob value) { + if (pysparkJobBuilder_ == null) { + if (jobTypeCase_ == 4 && + jobType_ != com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance()) { + jobType_ = com.google.cloud.dataproc.v1beta2.PySparkJob.newBuilder((com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_) + .mergeFrom(value).buildPartial(); + } else { + jobType_ = value; + } + onChanged(); + } else { + if (jobTypeCase_ == 4) { + pysparkJobBuilder_.mergeFrom(value); + } + pysparkJobBuilder_.setMessage(value); + } + jobTypeCase_ = 4; + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public Builder clearPysparkJob() { + if (pysparkJobBuilder_ == null) { + if (jobTypeCase_ == 4) { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + } + } else { + if (jobTypeCase_ == 4) { + jobTypeCase_ = 0; + jobType_ = null; + } + pysparkJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJob.Builder getPysparkJobBuilder() { + return getPysparkJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + public com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder getPysparkJobOrBuilder() { + if ((jobTypeCase_ == 4) && (pysparkJobBuilder_ != null)) { + return pysparkJobBuilder_.getMessageOrBuilder(); + } else { + if (jobTypeCase_ == 4) { + return (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pyspark job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PySparkJob, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder, com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder> + getPysparkJobFieldBuilder() { + if (pysparkJobBuilder_ == null) { + if (!(jobTypeCase_ == 4)) { + jobType_ = com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + pysparkJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PySparkJob, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder, com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.PySparkJob) jobType_, + getParentForChildren(), + isClean()); + jobType_ = null; + } + jobTypeCase_ = 4; + onChanged();; + return pysparkJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HiveJob, com.google.cloud.dataproc.v1beta2.HiveJob.Builder, com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder> hiveJobBuilder_; + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public boolean hasHiveJob() { + return jobTypeCase_ == 5; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.HiveJob getHiveJob() { + if (hiveJobBuilder_ == null) { + if (jobTypeCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } else { + if (jobTypeCase_ == 5) { + return hiveJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public Builder setHiveJob(com.google.cloud.dataproc.v1beta2.HiveJob value) { + if (hiveJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + jobType_ = value; + onChanged(); + } else { + hiveJobBuilder_.setMessage(value); + } + jobTypeCase_ = 5; + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public Builder setHiveJob( + com.google.cloud.dataproc.v1beta2.HiveJob.Builder builderForValue) { + if (hiveJobBuilder_ == null) { + jobType_ = builderForValue.build(); + onChanged(); + } else { + hiveJobBuilder_.setMessage(builderForValue.build()); + } + jobTypeCase_ = 5; + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public Builder mergeHiveJob(com.google.cloud.dataproc.v1beta2.HiveJob value) { + if (hiveJobBuilder_ == null) { + if (jobTypeCase_ == 5 && + jobType_ != com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance()) { + jobType_ = com.google.cloud.dataproc.v1beta2.HiveJob.newBuilder((com.google.cloud.dataproc.v1beta2.HiveJob) jobType_) + .mergeFrom(value).buildPartial(); + } else { + jobType_ = value; + } + onChanged(); + } else { + if (jobTypeCase_ == 5) { + hiveJobBuilder_.mergeFrom(value); + } + hiveJobBuilder_.setMessage(value); + } + jobTypeCase_ = 5; + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public Builder clearHiveJob() { + if (hiveJobBuilder_ == null) { + if (jobTypeCase_ == 5) { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + } + } else { + if (jobTypeCase_ == 5) { + jobTypeCase_ = 0; + jobType_ = null; + } + hiveJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.HiveJob.Builder getHiveJobBuilder() { + return getHiveJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + public com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder getHiveJobOrBuilder() { + if ((jobTypeCase_ == 5) && (hiveJobBuilder_ != null)) { + return hiveJobBuilder_.getMessageOrBuilder(); + } else { + if (jobTypeCase_ == 5) { + return (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Hive job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HiveJob, com.google.cloud.dataproc.v1beta2.HiveJob.Builder, com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder> + getHiveJobFieldBuilder() { + if (hiveJobBuilder_ == null) { + if (!(jobTypeCase_ == 5)) { + jobType_ = com.google.cloud.dataproc.v1beta2.HiveJob.getDefaultInstance(); + } + hiveJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.HiveJob, com.google.cloud.dataproc.v1beta2.HiveJob.Builder, com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.HiveJob) jobType_, + getParentForChildren(), + isClean()); + jobType_ = null; + } + jobTypeCase_ = 5; + onChanged();; + return hiveJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PigJob, com.google.cloud.dataproc.v1beta2.PigJob.Builder, com.google.cloud.dataproc.v1beta2.PigJobOrBuilder> pigJobBuilder_; + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public boolean hasPigJob() { + return jobTypeCase_ == 6; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.PigJob getPigJob() { + if (pigJobBuilder_ == null) { + if (jobTypeCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.PigJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } else { + if (jobTypeCase_ == 6) { + return pigJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public Builder setPigJob(com.google.cloud.dataproc.v1beta2.PigJob value) { + if (pigJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + jobType_ = value; + onChanged(); + } else { + pigJobBuilder_.setMessage(value); + } + jobTypeCase_ = 6; + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public Builder setPigJob( + com.google.cloud.dataproc.v1beta2.PigJob.Builder builderForValue) { + if (pigJobBuilder_ == null) { + jobType_ = builderForValue.build(); + onChanged(); + } else { + pigJobBuilder_.setMessage(builderForValue.build()); + } + jobTypeCase_ = 6; + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public Builder mergePigJob(com.google.cloud.dataproc.v1beta2.PigJob value) { + if (pigJobBuilder_ == null) { + if (jobTypeCase_ == 6 && + jobType_ != com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance()) { + jobType_ = com.google.cloud.dataproc.v1beta2.PigJob.newBuilder((com.google.cloud.dataproc.v1beta2.PigJob) jobType_) + .mergeFrom(value).buildPartial(); + } else { + jobType_ = value; + } + onChanged(); + } else { + if (jobTypeCase_ == 6) { + pigJobBuilder_.mergeFrom(value); + } + pigJobBuilder_.setMessage(value); + } + jobTypeCase_ = 6; + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public Builder clearPigJob() { + if (pigJobBuilder_ == null) { + if (jobTypeCase_ == 6) { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + } + } else { + if (jobTypeCase_ == 6) { + jobTypeCase_ = 0; + jobType_ = null; + } + pigJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.PigJob.Builder getPigJobBuilder() { + return getPigJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + public com.google.cloud.dataproc.v1beta2.PigJobOrBuilder getPigJobOrBuilder() { + if ((jobTypeCase_ == 6) && (pigJobBuilder_ != null)) { + return pigJobBuilder_.getMessageOrBuilder(); + } else { + if (jobTypeCase_ == 6) { + return (com.google.cloud.dataproc.v1beta2.PigJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a Pig job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PigJob, com.google.cloud.dataproc.v1beta2.PigJob.Builder, com.google.cloud.dataproc.v1beta2.PigJobOrBuilder> + getPigJobFieldBuilder() { + if (pigJobBuilder_ == null) { + if (!(jobTypeCase_ == 6)) { + jobType_ = com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + pigJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.PigJob, com.google.cloud.dataproc.v1beta2.PigJob.Builder, com.google.cloud.dataproc.v1beta2.PigJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.PigJob) jobType_, + getParentForChildren(), + isClean()); + jobType_ = null; + } + jobTypeCase_ = 6; + onChanged();; + return pigJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkSqlJob, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder, com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder> sparkSqlJobBuilder_; + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public boolean hasSparkSqlJob() { + return jobTypeCase_ == 7; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJob getSparkSqlJob() { + if (sparkSqlJobBuilder_ == null) { + if (jobTypeCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } else { + if (jobTypeCase_ == 7) { + return sparkSqlJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public Builder setSparkSqlJob(com.google.cloud.dataproc.v1beta2.SparkSqlJob value) { + if (sparkSqlJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + jobType_ = value; + onChanged(); + } else { + sparkSqlJobBuilder_.setMessage(value); + } + jobTypeCase_ = 7; + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public Builder setSparkSqlJob( + com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder builderForValue) { + if (sparkSqlJobBuilder_ == null) { + jobType_ = builderForValue.build(); + onChanged(); + } else { + sparkSqlJobBuilder_.setMessage(builderForValue.build()); + } + jobTypeCase_ = 7; + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1beta2.SparkSqlJob value) { + if (sparkSqlJobBuilder_ == null) { + if (jobTypeCase_ == 7 && + jobType_ != com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance()) { + jobType_ = com.google.cloud.dataproc.v1beta2.SparkSqlJob.newBuilder((com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_) + .mergeFrom(value).buildPartial(); + } else { + jobType_ = value; + } + onChanged(); + } else { + if (jobTypeCase_ == 7) { + sparkSqlJobBuilder_.mergeFrom(value); + } + sparkSqlJobBuilder_.setMessage(value); + } + jobTypeCase_ = 7; + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public Builder clearSparkSqlJob() { + if (sparkSqlJobBuilder_ == null) { + if (jobTypeCase_ == 7) { + jobTypeCase_ = 0; + jobType_ = null; + onChanged(); + } + } else { + if (jobTypeCase_ == 7) { + jobTypeCase_ = 0; + jobType_ = null; + } + sparkSqlJobBuilder_.clear(); + } + return this; + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder getSparkSqlJobBuilder() { + return getSparkSqlJobFieldBuilder().getBuilder(); + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + public com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { + if ((jobTypeCase_ == 7) && (sparkSqlJobBuilder_ != null)) { + return sparkSqlJobBuilder_.getMessageOrBuilder(); + } else { + if (jobTypeCase_ == 7) { + return (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_; + } + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + } + /** + *
+     * Job is a SparkSql job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkSqlJob, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder, com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder> + getSparkSqlJobFieldBuilder() { + if (sparkSqlJobBuilder_ == null) { + if (!(jobTypeCase_ == 7)) { + jobType_ = com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + sparkSqlJobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.SparkSqlJob, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder, com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder>( + (com.google.cloud.dataproc.v1beta2.SparkSqlJob) jobType_, + getParentForChildren(), + isClean()); + jobType_ = null; + } + jobTypeCase_ = 7; + onChanged();; + return sparkSqlJobBuilder_; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged();; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public Builder removeLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + public Builder putLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. The labels to associate with this job.
+     * Label keys must be between 1 and 63 characters long, and must conform to
+     * the following regular expression:
+     * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+     * Label values must be between 1 and 63 characters long, and must conform to
+     * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+     * No more than 32 labels can be associated with a given job.
+     * 
+ * + * map<string, string> labels = 8; + */ + + public Builder putAllLabels( + java.util.Map values) { + internalGetMutableLabels().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.JobScheduling scheduling_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobScheduling, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder, com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder> schedulingBuilder_; + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public boolean hasScheduling() { + return schedulingBuilder_ != null || scheduling_ != null; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public com.google.cloud.dataproc.v1beta2.JobScheduling getScheduling() { + if (schedulingBuilder_ == null) { + return scheduling_ == null ? com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance() : scheduling_; + } else { + return schedulingBuilder_.getMessage(); + } + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public Builder setScheduling(com.google.cloud.dataproc.v1beta2.JobScheduling value) { + if (schedulingBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + scheduling_ = value; + onChanged(); + } else { + schedulingBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public Builder setScheduling( + com.google.cloud.dataproc.v1beta2.JobScheduling.Builder builderForValue) { + if (schedulingBuilder_ == null) { + scheduling_ = builderForValue.build(); + onChanged(); + } else { + schedulingBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public Builder mergeScheduling(com.google.cloud.dataproc.v1beta2.JobScheduling value) { + if (schedulingBuilder_ == null) { + if (scheduling_ != null) { + scheduling_ = + com.google.cloud.dataproc.v1beta2.JobScheduling.newBuilder(scheduling_).mergeFrom(value).buildPartial(); + } else { + scheduling_ = value; + } + onChanged(); + } else { + schedulingBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public Builder clearScheduling() { + if (schedulingBuilder_ == null) { + scheduling_ = null; + onChanged(); + } else { + scheduling_ = null; + schedulingBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public com.google.cloud.dataproc.v1beta2.JobScheduling.Builder getSchedulingBuilder() { + + onChanged(); + return getSchedulingFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + public com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder getSchedulingOrBuilder() { + if (schedulingBuilder_ != null) { + return schedulingBuilder_.getMessageOrBuilder(); + } else { + return scheduling_ == null ? + com.google.cloud.dataproc.v1beta2.JobScheduling.getDefaultInstance() : scheduling_; + } + } + /** + *
+     * Optional. Job scheduling configuration.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobScheduling, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder, com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder> + getSchedulingFieldBuilder() { + if (schedulingBuilder_ == null) { + schedulingBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.JobScheduling, com.google.cloud.dataproc.v1beta2.JobScheduling.Builder, com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder>( + getScheduling(), + getParentForChildren(), + isClean()); + scheduling_ = null; + } + return schedulingBuilder_; + } + + private com.google.protobuf.LazyStringList prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensurePrerequisiteStepIdsIsMutable() { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { + prerequisiteStepIds_ = new com.google.protobuf.LazyStringArrayList(prerequisiteStepIds_); + bitField0_ |= 0x00000200; + } + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public com.google.protobuf.ProtocolStringList + getPrerequisiteStepIdsList() { + return prerequisiteStepIds_.getUnmodifiableView(); + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public int getPrerequisiteStepIdsCount() { + return prerequisiteStepIds_.size(); + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public java.lang.String getPrerequisiteStepIds(int index) { + return prerequisiteStepIds_.get(index); + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public com.google.protobuf.ByteString + getPrerequisiteStepIdsBytes(int index) { + return prerequisiteStepIds_.getByteString(index); + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public Builder setPrerequisiteStepIds( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public Builder addPrerequisiteStepIds( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public Builder addAllPrerequisiteStepIds( + java.lang.Iterable values) { + ensurePrerequisiteStepIdsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, prerequisiteStepIds_); + onChanged(); + return this; + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public Builder clearPrerequisiteStepIds() { + prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + return this; + } + /** + *
+     * Optional. The optional list of prerequisite job step_ids.
+     * If not specified, the job will start at the beginning of workflow.
+     * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + public Builder addPrerequisiteStepIdsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.add(value); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.OrderedJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.OrderedJob) + private static final com.google.cloud.dataproc.v1beta2.OrderedJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.OrderedJob(); + } + + public static com.google.cloud.dataproc.v1beta2.OrderedJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OrderedJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OrderedJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.OrderedJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java new file mode 100644 index 000000000000..432e05ae0f07 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java @@ -0,0 +1,343 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface OrderedJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.OrderedJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The step id. The id must be unique among all jobs
+   * within the template.
+   * The step id is used as prefix for job id, as job
+   * `goog-dataproc-workflow-step-id` label, and in
+   * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+   * steps.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string step_id = 1; + */ + java.lang.String getStepId(); + /** + *
+   * Required. The step id. The id must be unique among all jobs
+   * within the template.
+   * The step id is used as prefix for job id, as job
+   * `goog-dataproc-workflow-step-id` label, and in
+   * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
+   * steps.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string step_id = 1; + */ + com.google.protobuf.ByteString + getStepIdBytes(); + + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + boolean hasHadoopJob(); + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + com.google.cloud.dataproc.v1beta2.HadoopJob getHadoopJob(); + /** + *
+   * Job is a Hadoop job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HadoopJob hadoop_job = 2; + */ + com.google.cloud.dataproc.v1beta2.HadoopJobOrBuilder getHadoopJobOrBuilder(); + + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + boolean hasSparkJob(); + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + com.google.cloud.dataproc.v1beta2.SparkJob getSparkJob(); + /** + *
+   * Job is a Spark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkJob spark_job = 3; + */ + com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder getSparkJobOrBuilder(); + + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + boolean hasPysparkJob(); + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + com.google.cloud.dataproc.v1beta2.PySparkJob getPysparkJob(); + /** + *
+   * Job is a Pyspark job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PySparkJob pyspark_job = 4; + */ + com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder getPysparkJobOrBuilder(); + + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + boolean hasHiveJob(); + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + com.google.cloud.dataproc.v1beta2.HiveJob getHiveJob(); + /** + *
+   * Job is a Hive job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.HiveJob hive_job = 5; + */ + com.google.cloud.dataproc.v1beta2.HiveJobOrBuilder getHiveJobOrBuilder(); + + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + boolean hasPigJob(); + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + com.google.cloud.dataproc.v1beta2.PigJob getPigJob(); + /** + *
+   * Job is a Pig job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.PigJob pig_job = 6; + */ + com.google.cloud.dataproc.v1beta2.PigJobOrBuilder getPigJobOrBuilder(); + + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + boolean hasSparkSqlJob(); + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + com.google.cloud.dataproc.v1beta2.SparkSqlJob getSparkSqlJob(); + /** + *
+   * Job is a SparkSql job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.SparkSqlJob spark_sql_job = 7; + */ + com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder(); + + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + int getLabelsCount(); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + boolean containsLabels( + java.lang.String key); + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getLabels(); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + java.util.Map + getLabelsMap(); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + + java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. The labels to associate with this job.
+   * Label keys must be between 1 and 63 characters long, and must conform to
+   * the following regular expression:
+   * [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
+   * Label values must be between 1 and 63 characters long, and must conform to
+   * the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+   * No more than 32 labels can be associated with a given job.
+   * 
+ * + * map<string, string> labels = 8; + */ + + java.lang.String getLabelsOrThrow( + java.lang.String key); + + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + boolean hasScheduling(); + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + com.google.cloud.dataproc.v1beta2.JobScheduling getScheduling(); + /** + *
+   * Optional. Job scheduling configuration.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.JobScheduling scheduling = 9; + */ + com.google.cloud.dataproc.v1beta2.JobSchedulingOrBuilder getSchedulingOrBuilder(); + + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + java.util.List + getPrerequisiteStepIdsList(); + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + int getPrerequisiteStepIdsCount(); + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + java.lang.String getPrerequisiteStepIds(int index); + /** + *
+   * Optional. The optional list of prerequisite job step_ids.
+   * If not specified, the job will start at the beginning of workflow.
+   * 
+ * + * repeated string prerequisite_step_ids = 10; + */ + com.google.protobuf.ByteString + getPrerequisiteStepIdsBytes(int index); + + public com.google.cloud.dataproc.v1beta2.OrderedJob.JobTypeCase getJobTypeCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java new file mode 100644 index 000000000000..d22a74871c75 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java @@ -0,0 +1,2128 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/)
+ * queries on YARN.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.PigJob} + */ +public final class PigJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.PigJob) + PigJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use PigJob.newBuilder() to construct. + private PigJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PigJob() { + continueOnFailure_ = false; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PigJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + queriesCase_ = 1; + queries_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.QueryList.Builder subBuilder = null; + if (queriesCase_ == 2) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.QueryList) queries_).toBuilder(); + } + queries_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.QueryList.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.QueryList) queries_); + queries_ = subBuilder.buildPartial(); + } + queriesCase_ = 2; + break; + } + case 24: { + + continueOnFailure_ = input.readBool(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + scriptVariables_ = com.google.protobuf.MapField.newMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000008; + } + com.google.protobuf.MapEntry + scriptVariables__ = input.readMessage( + ScriptVariablesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + scriptVariables_.getMutableMap().put( + scriptVariables__.getKey(), scriptVariables__.getValue()); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000010; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + jarFileUris_.add(s); + break; + } + case 58: { + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 4: + return internalGetScriptVariables(); + case 5: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.PigJob.class, com.google.cloud.dataproc.v1beta2.PigJob.Builder.class); + } + + private int bitField0_; + private int queriesCase_ = 0; + private java.lang.Object queries_; + public enum QueriesCase + implements com.google.protobuf.Internal.EnumLite { + QUERY_FILE_URI(1), + QUERY_LIST(2), + QUERIES_NOT_SET(0); + private final int value; + private QueriesCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static QueriesCase valueOf(int value) { + return forNumber(value); + } + + public static QueriesCase forNumber(int value) { + switch (value) { + case 1: return QUERY_FILE_URI; + case 2: return QUERY_LIST; + case 0: return QUERIES_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public QueriesCase + getQueriesCase() { + return QueriesCase.forNumber( + queriesCase_); + } + + public static final int QUERY_FILE_URI_FIELD_NUMBER = 1; + /** + *
+   * The HCFS URI of the script that contains the Pig queries.
+   * 
+ * + * string query_file_uri = 1; + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } + } + /** + *
+   * The HCFS URI of the script that contains the Pig queries.
+   * 
+ * + * string query_file_uri = 1; + */ + public com.google.protobuf.ByteString + getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int QUERY_LIST_FIELD_NUMBER = 2; + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList getQueryList() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + + public static final int CONTINUE_ON_FAILURE_FIELD_NUMBER = 3; + private boolean continueOnFailure_; + /** + *
+   * Optional. Whether to continue executing queries if a query fails.
+   * The default value is `false`. Setting to `true` can be useful when executing
+   * independent parallel queries.
+   * 
+ * + * bool continue_on_failure = 3; + */ + public boolean getContinueOnFailure() { + return continueOnFailure_; + } + + public static final int SCRIPT_VARIABLES_FIELD_NUMBER = 4; + private static final class ScriptVariablesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_ScriptVariablesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> scriptVariables_; + private com.google.protobuf.MapField + internalGetScriptVariables() { + if (scriptVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + return scriptVariables_; + } + + public int getScriptVariablesCount() { + return internalGetScriptVariables().getMap().size(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public boolean containsScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetScriptVariables().getMap().containsKey(key); + } + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getScriptVariables() { + return getScriptVariablesMap(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.util.Map getScriptVariablesMap() { + return internalGetScriptVariables().getMap(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int PROPERTIES_FIELD_NUMBER = 5; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_; + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 7; + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_; + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (queriesCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queries_); + } + if (queriesCase_ == 2) { + output.writeMessage(2, (com.google.cloud.dataproc.v1beta2.QueryList) queries_); + } + if (continueOnFailure_ != false) { + output.writeBool(3, continueOnFailure_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetScriptVariables(), + ScriptVariablesDefaultEntryHolder.defaultEntry, + 4); + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 5); + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, jarFileUris_.getRaw(i)); + } + if (loggingConfig_ != null) { + output.writeMessage(7, getLoggingConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (queriesCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, queries_); + } + if (queriesCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, (com.google.cloud.dataproc.v1beta2.QueryList) queries_); + } + if (continueOnFailure_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, continueOnFailure_); + } + for (java.util.Map.Entry entry + : internalGetScriptVariables().getMap().entrySet()) { + com.google.protobuf.MapEntry + scriptVariables__ = ScriptVariablesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, scriptVariables__); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, properties__); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, getLoggingConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.PigJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.PigJob other = (com.google.cloud.dataproc.v1beta2.PigJob) obj; + + boolean result = true; + result = result && (getContinueOnFailure() + == other.getContinueOnFailure()); + result = result && internalGetScriptVariables().equals( + other.internalGetScriptVariables()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && getJarFileUrisList() + .equals(other.getJarFileUrisList()); + result = result && (hasLoggingConfig() == other.hasLoggingConfig()); + if (hasLoggingConfig()) { + result = result && getLoggingConfig() + .equals(other.getLoggingConfig()); + } + result = result && getQueriesCase().equals( + other.getQueriesCase()); + if (!result) return false; + switch (queriesCase_) { + case 1: + result = result && getQueryFileUri() + .equals(other.getQueryFileUri()); + break; + case 2: + result = result && getQueryList() + .equals(other.getQueryList()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CONTINUE_ON_FAILURE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getContinueOnFailure()); + if (!internalGetScriptVariables().getMap().isEmpty()) { + hash = (37 * hash) + SCRIPT_VARIABLES_FIELD_NUMBER; + hash = (53 * hash) + internalGetScriptVariables().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + switch (queriesCase_) { + case 1: + hash = (37 * hash) + QUERY_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getQueryFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + QUERY_LIST_FIELD_NUMBER; + hash = (53 * hash) + getQueryList().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.PigJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.PigJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/)
+   * queries on YARN.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.PigJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.PigJob) + com.google.cloud.dataproc.v1beta2.PigJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 4: + return internalGetScriptVariables(); + case 5: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 4: + return internalGetMutableScriptVariables(); + case 5: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.PigJob.class, com.google.cloud.dataproc.v1beta2.PigJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.PigJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + continueOnFailure_ = false; + + internalGetMutableScriptVariables().clear(); + internalGetMutableProperties().clear(); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + queriesCase_ = 0; + queries_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PigJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PigJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PigJob build() { + com.google.cloud.dataproc.v1beta2.PigJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PigJob buildPartial() { + com.google.cloud.dataproc.v1beta2.PigJob result = new com.google.cloud.dataproc.v1beta2.PigJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (queriesCase_ == 1) { + result.queries_ = queries_; + } + if (queriesCase_ == 2) { + if (queryListBuilder_ == null) { + result.queries_ = queries_; + } else { + result.queries_ = queryListBuilder_.build(); + } + } + result.continueOnFailure_ = continueOnFailure_; + result.scriptVariables_ = internalGetScriptVariables(); + result.scriptVariables_.makeImmutable(); + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.jarFileUris_ = jarFileUris_; + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + result.queriesCase_ = queriesCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.PigJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.PigJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.PigJob other) { + if (other == com.google.cloud.dataproc.v1beta2.PigJob.getDefaultInstance()) return this; + if (other.getContinueOnFailure() != false) { + setContinueOnFailure(other.getContinueOnFailure()); + } + internalGetMutableScriptVariables().mergeFrom( + other.internalGetScriptVariables()); + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + switch (other.getQueriesCase()) { + case QUERY_FILE_URI: { + queriesCase_ = 1; + queries_ = other.queries_; + onChanged(); + break; + } + case QUERY_LIST: { + mergeQueryList(other.getQueryList()); + break; + } + case QUERIES_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.PigJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.PigJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int queriesCase_ = 0; + private java.lang.Object queries_; + public QueriesCase + getQueriesCase() { + return QueriesCase.forNumber( + queriesCase_); + } + + public Builder clearQueries() { + queriesCase_ = 0; + queries_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + *
+     * The HCFS URI of the script that contains the Pig queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The HCFS URI of the script that contains the Pig queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public com.google.protobuf.ByteString + getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The HCFS URI of the script that contains the Pig queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder setQueryFileUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + /** + *
+     * The HCFS URI of the script that contains the Pig queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder clearQueryFileUri() { + if (queriesCase_ == 1) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The HCFS URI of the script that contains the Pig queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder setQueryFileUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder> queryListBuilder_; + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList getQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } else { + if (queriesCase_ == 2) { + return queryListBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder setQueryList(com.google.cloud.dataproc.v1beta2.QueryList value) { + if (queryListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queries_ = value; + onChanged(); + } else { + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder setQueryList( + com.google.cloud.dataproc.v1beta2.QueryList.Builder builderForValue) { + if (queryListBuilder_ == null) { + queries_ = builderForValue.build(); + onChanged(); + } else { + queryListBuilder_.setMessage(builderForValue.build()); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder mergeQueryList(com.google.cloud.dataproc.v1beta2.QueryList value) { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2 && + queries_ != com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance()) { + queries_ = com.google.cloud.dataproc.v1beta2.QueryList.newBuilder((com.google.cloud.dataproc.v1beta2.QueryList) queries_) + .mergeFrom(value).buildPartial(); + } else { + queries_ = value; + } + onChanged(); + } else { + if (queriesCase_ == 2) { + queryListBuilder_.mergeFrom(value); + } + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder clearQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + } else { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + } + queryListBuilder_.clear(); + } + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList.Builder getQueryListBuilder() { + return getQueryListFieldBuilder().getBuilder(); + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder() { + if ((queriesCase_ == 2) && (queryListBuilder_ != null)) { + return queryListBuilder_.getMessageOrBuilder(); + } else { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder> + getQueryListFieldBuilder() { + if (queryListBuilder_ == null) { + if (!(queriesCase_ == 2)) { + queries_ = com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + queryListBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder>( + (com.google.cloud.dataproc.v1beta2.QueryList) queries_, + getParentForChildren(), + isClean()); + queries_ = null; + } + queriesCase_ = 2; + onChanged();; + return queryListBuilder_; + } + + private boolean continueOnFailure_ ; + /** + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when executing
+     * independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3; + */ + public boolean getContinueOnFailure() { + return continueOnFailure_; + } + /** + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when executing
+     * independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3; + */ + public Builder setContinueOnFailure(boolean value) { + + continueOnFailure_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when executing
+     * independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3; + */ + public Builder clearContinueOnFailure() { + + continueOnFailure_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> scriptVariables_; + private com.google.protobuf.MapField + internalGetScriptVariables() { + if (scriptVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + return scriptVariables_; + } + private com.google.protobuf.MapField + internalGetMutableScriptVariables() { + onChanged();; + if (scriptVariables_ == null) { + scriptVariables_ = com.google.protobuf.MapField.newMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + if (!scriptVariables_.isMutable()) { + scriptVariables_ = scriptVariables_.copy(); + } + return scriptVariables_; + } + + public int getScriptVariablesCount() { + return internalGetScriptVariables().getMap().size(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public boolean containsScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetScriptVariables().getMap().containsKey(key); + } + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getScriptVariables() { + return getScriptVariablesMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.util.Map getScriptVariablesMap() { + return internalGetScriptVariables().getMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public java.lang.String getScriptVariablesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearScriptVariables() { + internalGetMutableScriptVariables().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public Builder removeScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableScriptVariables().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableScriptVariables() { + return internalGetMutableScriptVariables().getMutableMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + public Builder putScriptVariables( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableScriptVariables().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the Pig
+     * command: `name=[value]`).
+     * 
+ * + * map<string, string> script_variables = 4; + */ + + public Builder putAllScriptVariables( + java.util.Map values) { + internalGetMutableScriptVariables().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Pig.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+     * /etc/pig/conf/pig.properties, and classes in user code.
+     * 
+ * + * map<string, string> properties = 5; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000020; + } + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder setJarFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder addJarFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder addAllJarFileUris( + java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jarFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+     * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+     * 
+ * + * repeated string jar_file_uris = 6; + */ + public Builder addJarFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> loggingConfigBuilder_; + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1beta2.LoggingConfig.newBuilder(loggingConfig_).mergeFrom(value).buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null ? + com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder>( + getLoggingConfig(), + getParentForChildren(), + isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.PigJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PigJob) + private static final com.google.cloud.dataproc.v1beta2.PigJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.PigJob(); + } + + public static com.google.cloud.dataproc.v1beta2.PigJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PigJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PigJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PigJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java new file mode 100644 index 000000000000..00e0e6fc4d2b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java @@ -0,0 +1,257 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface PigJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.PigJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The HCFS URI of the script that contains the Pig queries.
+   * 
+ * + * string query_file_uri = 1; + */ + java.lang.String getQueryFileUri(); + /** + *
+   * The HCFS URI of the script that contains the Pig queries.
+   * 
+ * + * string query_file_uri = 1; + */ + com.google.protobuf.ByteString + getQueryFileUriBytes(); + + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + boolean hasQueryList(); + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1beta2.QueryList getQueryList(); + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder(); + + /** + *
+   * Optional. Whether to continue executing queries if a query fails.
+   * The default value is `false`. Setting to `true` can be useful when executing
+   * independent parallel queries.
+   * 
+ * + * bool continue_on_failure = 3; + */ + boolean getContinueOnFailure(); + + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + int getScriptVariablesCount(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + boolean containsScriptVariables( + java.lang.String key); + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getScriptVariables(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + java.util.Map + getScriptVariablesMap(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the Pig
+   * command: `name=[value]`).
+   * 
+ * + * map<string, string> script_variables = 4; + */ + + java.lang.String getScriptVariablesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + int getPropertiesCount(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. A mapping of property names to values, used to configure Pig.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
+   * /etc/pig/conf/pig.properties, and classes in user code.
+   * 
+ * + * map<string, string> properties = 5; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + java.util.List + getJarFileUrisList(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + int getJarFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + java.lang.String getJarFileUris(int index); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATH of
+   * the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
+   * 
+ * + * repeated string jar_file_uris = 6; + */ + com.google.protobuf.ByteString + getJarFileUrisBytes(int index); + + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + boolean hasLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 7; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.PigJob.QueriesCase getQueriesCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java new file mode 100644 index 000000000000..896557f651ce --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java @@ -0,0 +1,2360 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job for running
+ * [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+ * applications on YARN.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.PySparkJob} + */ +public final class PySparkJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.PySparkJob) + PySparkJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use PySparkJob.newBuilder() to construct. + private PySparkJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PySparkJob() { + mainPythonFileUri_ = ""; + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PySparkJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + mainPythonFileUri_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + args_.add(s); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + pythonFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + pythonFileUris_.add(s); + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000008; + } + jarFileUris_.add(s); + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + fileUris_.add(s); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + archiveUris_.add(s); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000040; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + case 66: { + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + pythonFileUris_ = pythonFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 7: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PySparkJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.PySparkJob.class, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder.class); + } + + private int bitField0_; + public static final int MAIN_PYTHON_FILE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object mainPythonFileUri_; + /** + *
+   * Required. The HCFS URI of the main Python file to use as the driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1; + */ + public java.lang.String getMainPythonFileUri() { + java.lang.Object ref = mainPythonFileUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainPythonFileUri_ = s; + return s; + } + } + /** + *
+   * Required. The HCFS URI of the main Python file to use as the driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1; + */ + public com.google.protobuf.ByteString + getMainPythonFileUriBytes() { + java.lang.Object ref = mainPythonFileUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + mainPythonFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList args_; + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + public com.google.protobuf.ProtocolStringList + getArgsList() { + return args_; + } + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + public int getArgsCount() { + return args_.size(); + } + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + public com.google.protobuf.ByteString + getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int PYTHON_FILE_URIS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList pythonFileUris_; + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + public com.google.protobuf.ProtocolStringList + getPythonFileUrisList() { + return pythonFileUris_; + } + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + public int getPythonFileUrisCount() { + return pythonFileUris_.size(); + } + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + public java.lang.String getPythonFileUris(int index) { + return pythonFileUris_.get(index); + } + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + public com.google.protobuf.ByteString + getPythonFileUrisBytes(int index) { + return pythonFileUris_.getByteString(index); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_; + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 5; + private com.google.protobuf.LazyStringList fileUris_; + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ProtocolStringList + getFileUrisList() { + return fileUris_; + } + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ByteString + getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList archiveUris_; + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getArchiveUrisList() { + return archiveUris_; + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ByteString + getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + public static final int PROPERTIES_FIELD_NUMBER = 7; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PySparkJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 8; + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_; + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getMainPythonFileUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, mainPythonFileUri_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, args_.getRaw(i)); + } + for (int i = 0; i < pythonFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, pythonFileUris_.getRaw(i)); + } + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, jarFileUris_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, archiveUris_.getRaw(i)); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 7); + if (loggingConfig_ != null) { + output.writeMessage(8, getLoggingConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getMainPythonFileUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, mainPythonFileUri_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < pythonFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(pythonFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getPythonFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, properties__); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, getLoggingConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.PySparkJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.PySparkJob other = (com.google.cloud.dataproc.v1beta2.PySparkJob) obj; + + boolean result = true; + result = result && getMainPythonFileUri() + .equals(other.getMainPythonFileUri()); + result = result && getArgsList() + .equals(other.getArgsList()); + result = result && getPythonFileUrisList() + .equals(other.getPythonFileUrisList()); + result = result && getJarFileUrisList() + .equals(other.getJarFileUrisList()); + result = result && getFileUrisList() + .equals(other.getFileUrisList()); + result = result && getArchiveUrisList() + .equals(other.getArchiveUrisList()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && (hasLoggingConfig() == other.hasLoggingConfig()); + if (hasLoggingConfig()) { + result = result && getLoggingConfig() + .equals(other.getLoggingConfig()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAIN_PYTHON_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainPythonFileUri().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getPythonFileUrisCount() > 0) { + hash = (37 * hash) + PYTHON_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getPythonFileUrisList().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.PySparkJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.PySparkJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job for running
+   * [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+   * applications on YARN.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.PySparkJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.PySparkJob) + com.google.cloud.dataproc.v1beta2.PySparkJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 7: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 7: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PySparkJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.PySparkJob.class, com.google.cloud.dataproc.v1beta2.PySparkJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.PySparkJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + mainPythonFileUri_ = ""; + + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + internalGetMutableProperties().clear(); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_PySparkJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PySparkJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PySparkJob build() { + com.google.cloud.dataproc.v1beta2.PySparkJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PySparkJob buildPartial() { + com.google.cloud.dataproc.v1beta2.PySparkJob result = new com.google.cloud.dataproc.v1beta2.PySparkJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.mainPythonFileUri_ = mainPythonFileUri_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.args_ = args_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + pythonFileUris_ = pythonFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.pythonFileUris_ = pythonFileUris_; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.jarFileUris_ = jarFileUris_; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.archiveUris_ = archiveUris_; + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.PySparkJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.PySparkJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.PySparkJob other) { + if (other == com.google.cloud.dataproc.v1beta2.PySparkJob.getDefaultInstance()) return this; + if (!other.getMainPythonFileUri().isEmpty()) { + mainPythonFileUri_ = other.mainPythonFileUri_; + onChanged(); + } + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.pythonFileUris_.isEmpty()) { + if (pythonFileUris_.isEmpty()) { + pythonFileUris_ = other.pythonFileUris_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensurePythonFileUrisIsMutable(); + pythonFileUris_.addAll(other.pythonFileUris_); + } + onChanged(); + } + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.PySparkJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.PySparkJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object mainPythonFileUri_ = ""; + /** + *
+     * Required. The HCFS URI of the main Python file to use as the driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1; + */ + public java.lang.String getMainPythonFileUri() { + java.lang.Object ref = mainPythonFileUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainPythonFileUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The HCFS URI of the main Python file to use as the driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1; + */ + public com.google.protobuf.ByteString + getMainPythonFileUriBytes() { + java.lang.Object ref = mainPythonFileUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + mainPythonFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The HCFS URI of the main Python file to use as the driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1; + */ + public Builder setMainPythonFileUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + mainPythonFileUri_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The HCFS URI of the main Python file to use as the driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1; + */ + public Builder clearMainPythonFileUri() { + + mainPythonFileUri_ = getDefaultInstance().getMainPythonFileUri(); + onChanged(); + return this; + } + /** + *
+     * Required. The HCFS URI of the main Python file to use as the driver. Must
+     * be a .py file.
+     * 
+ * + * string main_python_file_uri = 1; + */ + public Builder setMainPythonFileUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + mainPythonFileUri_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000002; + } + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public com.google.protobuf.ProtocolStringList + getArgsList() { + return args_.getUnmodifiableView(); + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public int getArgsCount() { + return args_.size(); + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public com.google.protobuf.ByteString + getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public Builder setArgs( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public Builder addArgs( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public Builder addAllArgs( + java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, args_); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2; + */ + public Builder addArgsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensurePythonFileUrisIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + pythonFileUris_ = new com.google.protobuf.LazyStringArrayList(pythonFileUris_); + bitField0_ |= 0x00000004; + } + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public com.google.protobuf.ProtocolStringList + getPythonFileUrisList() { + return pythonFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public int getPythonFileUrisCount() { + return pythonFileUris_.size(); + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public java.lang.String getPythonFileUris(int index) { + return pythonFileUris_.get(index); + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public com.google.protobuf.ByteString + getPythonFileUrisBytes(int index) { + return pythonFileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public Builder setPythonFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePythonFileUrisIsMutable(); + pythonFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public Builder addPythonFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePythonFileUrisIsMutable(); + pythonFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public Builder addAllPythonFileUris( + java.lang.Iterable values) { + ensurePythonFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, pythonFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public Builder clearPythonFileUris() { + pythonFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS file URIs of Python files to pass to the PySpark
+     * framework. Supported file types: .py, .egg, and .zip.
+     * 
+ * + * repeated string python_file_uris = 3; + */ + public Builder addPythonFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensurePythonFileUrisIsMutable(); + pythonFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000008; + } + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder setJarFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addJarFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addAllJarFileUris( + java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jarFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Python driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addJarFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000010; + } + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ProtocolStringList + getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ByteString + getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder setFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addAllFileUris( + java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, fileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Python drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000020; + } + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ByteString + getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder setArchiveUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addArchiveUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addAllArchiveUris( + java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, archiveUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addArchiveUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure PySpark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> loggingConfigBuilder_; + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1beta2.LoggingConfig.newBuilder(loggingConfig_).mergeFrom(value).buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null ? + com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder>( + getLoggingConfig(), + getParentForChildren(), + isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.PySparkJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PySparkJob) + private static final com.google.cloud.dataproc.v1beta2.PySparkJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.PySparkJob(); + } + + public static com.google.cloud.dataproc.v1beta2.PySparkJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PySparkJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PySparkJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.PySparkJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJobOrBuilder.java new file mode 100644 index 000000000000..b2c0f961ab78 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJobOrBuilder.java @@ -0,0 +1,322 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface PySparkJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.PySparkJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The HCFS URI of the main Python file to use as the driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1; + */ + java.lang.String getMainPythonFileUri(); + /** + *
+   * Required. The HCFS URI of the main Python file to use as the driver. Must
+   * be a .py file.
+   * 
+ * + * string main_python_file_uri = 1; + */ + com.google.protobuf.ByteString + getMainPythonFileUriBytes(); + + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + java.util.List + getArgsList(); + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + int getArgsCount(); + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + java.lang.String getArgs(int index); + /** + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2; + */ + com.google.protobuf.ByteString + getArgsBytes(int index); + + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + java.util.List + getPythonFileUrisList(); + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + int getPythonFileUrisCount(); + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + java.lang.String getPythonFileUris(int index); + /** + *
+   * Optional. HCFS file URIs of Python files to pass to the PySpark
+   * framework. Supported file types: .py, .egg, and .zip.
+   * 
+ * + * repeated string python_file_uris = 3; + */ + com.google.protobuf.ByteString + getPythonFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + java.util.List + getJarFileUrisList(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + int getJarFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + java.lang.String getJarFileUris(int index); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Python driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + com.google.protobuf.ByteString + getJarFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + java.util.List + getFileUrisList(); + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + int getFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + java.lang.String getFileUris(int index); + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Python drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + com.google.protobuf.ByteString + getFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + java.util.List + getArchiveUrisList(); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + int getArchiveUrisCount(); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + java.lang.String getArchiveUris(int index); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + com.google.protobuf.ByteString + getArchiveUrisBytes(int index); + + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + int getPropertiesCount(); + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. A mapping of property names to values, used to configure PySpark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + boolean hasLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryList.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryList.java new file mode 100644 index 000000000000..096e86533e94 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryList.java @@ -0,0 +1,802 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A list of queries to run on a cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.QueryList} + */ +public final class QueryList extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.QueryList) + QueryListOrBuilder { +private static final long serialVersionUID = 0L; + // Use QueryList.newBuilder() to construct. + private QueryList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private QueryList() { + queries_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private QueryList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + queries_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + queries_.add(s); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + queries_ = queries_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_QueryList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_QueryList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.QueryList.class, com.google.cloud.dataproc.v1beta2.QueryList.Builder.class); + } + + public static final int QUERIES_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList queries_; + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + public com.google.protobuf.ProtocolStringList + getQueriesList() { + return queries_; + } + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + public int getQueriesCount() { + return queries_.size(); + } + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + public java.lang.String getQueries(int index) { + return queries_.get(index); + } + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + public com.google.protobuf.ByteString + getQueriesBytes(int index) { + return queries_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < queries_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queries_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < queries_.size(); i++) { + dataSize += computeStringSizeNoTag(queries_.getRaw(i)); + } + size += dataSize; + size += 1 * getQueriesList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.QueryList)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.QueryList other = (com.google.cloud.dataproc.v1beta2.QueryList) obj; + + boolean result = true; + result = result && getQueriesList() + .equals(other.getQueriesList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getQueriesCount() > 0) { + hash = (37 * hash) + QUERIES_FIELD_NUMBER; + hash = (53 * hash) + getQueriesList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.QueryList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.QueryList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A list of queries to run on a cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.QueryList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.QueryList) + com.google.cloud.dataproc.v1beta2.QueryListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_QueryList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_QueryList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.QueryList.class, com.google.cloud.dataproc.v1beta2.QueryList.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.QueryList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + queries_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_QueryList_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.QueryList getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.QueryList build() { + com.google.cloud.dataproc.v1beta2.QueryList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.QueryList buildPartial() { + com.google.cloud.dataproc.v1beta2.QueryList result = new com.google.cloud.dataproc.v1beta2.QueryList(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + queries_ = queries_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.queries_ = queries_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.QueryList) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.QueryList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.QueryList other) { + if (other == com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance()) return this; + if (!other.queries_.isEmpty()) { + if (queries_.isEmpty()) { + queries_ = other.queries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureQueriesIsMutable(); + queries_.addAll(other.queries_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.QueryList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.QueryList) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private com.google.protobuf.LazyStringList queries_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureQueriesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + queries_ = new com.google.protobuf.LazyStringArrayList(queries_); + bitField0_ |= 0x00000001; + } + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public com.google.protobuf.ProtocolStringList + getQueriesList() { + return queries_.getUnmodifiableView(); + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public int getQueriesCount() { + return queries_.size(); + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public java.lang.String getQueries(int index) { + return queries_.get(index); + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public com.google.protobuf.ByteString + getQueriesBytes(int index) { + return queries_.getByteString(index); + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public Builder setQueries( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueriesIsMutable(); + queries_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public Builder addQueries( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueriesIsMutable(); + queries_.add(value); + onChanged(); + return this; + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public Builder addAllQueries( + java.lang.Iterable values) { + ensureQueriesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, queries_); + onChanged(); + return this; + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public Builder clearQueries() { + queries_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + *
+     * Required. The queries to execute. You do not need to terminate a query
+     * with a semicolon. Multiple queries can be specified in one string
+     * by separating each with a semicolon. Here is an example of an Cloud
+     * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+     *     "hiveJob": {
+     *       "queryList": {
+     *         "queries": [
+     *           "query1",
+     *           "query2",
+     *           "query3;query4",
+     *         ]
+     *       }
+     *     }
+     * 
+ * + * repeated string queries = 1; + */ + public Builder addQueriesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureQueriesIsMutable(); + queries_.add(value); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.QueryList) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.QueryList) + private static final com.google.cloud.dataproc.v1beta2.QueryList DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.QueryList(); + } + + public static com.google.cloud.dataproc.v1beta2.QueryList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new QueryList(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.QueryList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryListOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryListOrBuilder.java new file mode 100644 index 000000000000..6df77c395093 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryListOrBuilder.java @@ -0,0 +1,92 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface QueryListOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.QueryList) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + java.util.List + getQueriesList(); + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + int getQueriesCount(); + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + java.lang.String getQueries(int index); + /** + *
+   * Required. The queries to execute. You do not need to terminate a query
+   * with a semicolon. Multiple queries can be specified in one string
+   * by separating each with a semicolon. Here is an example of an Cloud
+   * Dataproc API snippet that uses a QueryList to specify a HiveJob:
+   *     "hiveJob": {
+   *       "queryList": {
+   *         "queries": [
+   *           "query1",
+   *           "query2",
+   *           "query3;query4",
+   *         ]
+   *       }
+   *     }
+   * 
+ * + * repeated string queries = 1; + */ + com.google.protobuf.ByteString + getQueriesBytes(int index); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegionName.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegionName.java new file mode 100644 index 000000000000..dcff46b581d9 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegionName.java @@ -0,0 +1,189 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package com.google.cloud.dataproc.v1beta2; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import java.util.Map; +import java.util.ArrayList; +import java.util.List; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +@javax.annotation.Generated("by GAPIC protoc plugin") +public class RegionName implements ResourceName { + + private static final PathTemplate PATH_TEMPLATE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/regions/{region}"); + + private volatile Map fieldValuesMap; + + private final String project; + private final String region; + + public String getProject() { + return project; + } + + public String getRegion() { + return region; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + private RegionName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + region = Preconditions.checkNotNull(builder.getRegion()); + } + + public static RegionName of(String project, String region) { + return newBuilder() + .setProject(project) + .setRegion(region) + .build(); + } + + public static String format(String project, String region) { + return newBuilder() + .setProject(project) + .setRegion(region) + .build() + .toString(); + } + + public static RegionName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PATH_TEMPLATE.validatedMatch(formattedString, "RegionName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("region")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList(values.size()); + for (RegionName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PATH_TEMPLATE.matches(formattedString); + } + + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + fieldMapBuilder.put("project", project); + fieldMapBuilder.put("region", region); + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PATH_TEMPLATE.instantiate("project", project, "region", region); + } + + /** Builder for RegionName. */ + public static class Builder { + + private String project; + private String region; + + public String getProject() { + return project; + } + + public String getRegion() { + return region; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setRegion(String region) { + this.region = region; + return this; + } + + private Builder() { + } + + private Builder(RegionName regionName) { + project = regionName.project; + region = regionName.region; + } + + public RegionName build() { + return new RegionName(this); + } + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o instanceof RegionName) { + RegionName that = (RegionName) o; + return (this.project.equals(that.project)) + && (this.region.equals(that.region)); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= project.hashCode(); + h *= 1000003; + h ^= region.hashCode(); + return h; + } +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SharedProto.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SharedProto.java new file mode 100644 index 000000000000..aebd40c63ff5 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SharedProto.java @@ -0,0 +1,50 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/shared.proto + +package com.google.cloud.dataproc.v1beta2; + +public final class SharedProto { + private SharedProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n*google/cloud/dataproc/v1beta2/shared.p" + + "roto\022\035google.cloud.dataproc.v1beta2\032\034goo" + + "gle/api/annotations.protoBy\n!com.google." + + "cloud.dataproc.v1beta2B\013SharedProtoP\001ZEg" + + "oogle.golang.org/genproto/googleapis/clo" + + "ud/dataproc/v1beta2;dataprocb\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + }, assigner); + com.google.api.AnnotationsProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java new file mode 100644 index 000000000000..48aeeccc3458 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java @@ -0,0 +1,1081 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies the selection and config of software inside the cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SoftwareConfig} + */ +public final class SoftwareConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.SoftwareConfig) + SoftwareConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use SoftwareConfig.newBuilder() to construct. + private SoftwareConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SoftwareConfig() { + imageVersion_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SoftwareConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + imageVersion_ = s; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000002; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SoftwareConfig.class, com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder.class); + } + + private int bitField0_; + public static final int IMAGE_VERSION_FIELD_NUMBER = 1; + private volatile java.lang.Object imageVersion_; + /** + *
+   * Optional. The version of software inside the cluster. It must be one of the supported
+   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * If unspecified, it defaults to the latest version.
+   * 
+ * + * string image_version = 1; + */ + public java.lang.String getImageVersion() { + java.lang.Object ref = imageVersion_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + imageVersion_ = s; + return s; + } + } + /** + *
+   * Optional. The version of software inside the cluster. It must be one of the supported
+   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * If unspecified, it defaults to the latest version.
+   * 
+ * + * string image_version = 1; + */ + public com.google.protobuf.ByteString + getImageVersionBytes() { + java.lang.Object ref = imageVersion_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + imageVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROPERTIES_FIELD_NUMBER = 2; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getImageVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, imageVersion_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 2); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getImageVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, imageVersion_); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, properties__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.SoftwareConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.SoftwareConfig other = (com.google.cloud.dataproc.v1beta2.SoftwareConfig) obj; + + boolean result = true; + result = result && getImageVersion() + .equals(other.getImageVersion()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + IMAGE_VERSION_FIELD_NUMBER; + hash = (53 * hash) + getImageVersion().hashCode(); + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.SoftwareConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies the selection and config of software inside the cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SoftwareConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.SoftwareConfig) + com.google.cloud.dataproc.v1beta2.SoftwareConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 2: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 2: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SoftwareConfig.class, com.google.cloud.dataproc.v1beta2.SoftwareConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.SoftwareConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + imageVersion_ = ""; + + internalGetMutableProperties().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_SoftwareConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SoftwareConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.SoftwareConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SoftwareConfig build() { + com.google.cloud.dataproc.v1beta2.SoftwareConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SoftwareConfig buildPartial() { + com.google.cloud.dataproc.v1beta2.SoftwareConfig result = new com.google.cloud.dataproc.v1beta2.SoftwareConfig(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.imageVersion_ = imageVersion_; + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.SoftwareConfig) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.SoftwareConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.SoftwareConfig other) { + if (other == com.google.cloud.dataproc.v1beta2.SoftwareConfig.getDefaultInstance()) return this; + if (!other.getImageVersion().isEmpty()) { + imageVersion_ = other.imageVersion_; + onChanged(); + } + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.SoftwareConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.SoftwareConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object imageVersion_ = ""; + /** + *
+     * Optional. The version of software inside the cluster. It must be one of the supported
+     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * If unspecified, it defaults to the latest version.
+     * 
+ * + * string image_version = 1; + */ + public java.lang.String getImageVersion() { + java.lang.Object ref = imageVersion_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + imageVersion_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The version of software inside the cluster. It must be one of the supported
+     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * If unspecified, it defaults to the latest version.
+     * 
+ * + * string image_version = 1; + */ + public com.google.protobuf.ByteString + getImageVersionBytes() { + java.lang.Object ref = imageVersion_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + imageVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The version of software inside the cluster. It must be one of the supported
+     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * If unspecified, it defaults to the latest version.
+     * 
+ * + * string image_version = 1; + */ + public Builder setImageVersion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + imageVersion_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The version of software inside the cluster. It must be one of the supported
+     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * If unspecified, it defaults to the latest version.
+     * 
+ * + * string image_version = 1; + */ + public Builder clearImageVersion() { + + imageVersion_ = getDefaultInstance().getImageVersion(); + onChanged(); + return this; + } + /** + *
+     * Optional. The version of software inside the cluster. It must be one of the supported
+     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * If unspecified, it defaults to the latest version.
+     * 
+ * + * string image_version = 1; + */ + public Builder setImageVersionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + imageVersion_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. The properties to set on daemon config files.
+     * Property keys are specified in `prefix:property` format, such as
+     * `core:fs.defaultFS`. The following are supported prefixes
+     * and their mappings:
+     * * capacity-scheduler: `capacity-scheduler.xml`
+     * * core:   `core-site.xml`
+     * * distcp: `distcp-default.xml`
+     * * hdfs:   `hdfs-site.xml`
+     * * hive:   `hive-site.xml`
+     * * mapred: `mapred-site.xml`
+     * * pig:    `pig.properties`
+     * * spark:  `spark-defaults.conf`
+     * * yarn:   `yarn-site.xml`
+     * For more information, see
+     * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+     * 
+ * + * map<string, string> properties = 2; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.SoftwareConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SoftwareConfig) + private static final com.google.cloud.dataproc.v1beta2.SoftwareConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.SoftwareConfig(); + } + + public static com.google.cloud.dataproc.v1beta2.SoftwareConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SoftwareConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SoftwareConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SoftwareConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java new file mode 100644 index 000000000000..b2ee51c32f38 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java @@ -0,0 +1,159 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface SoftwareConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.SoftwareConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. The version of software inside the cluster. It must be one of the supported
+   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * If unspecified, it defaults to the latest version.
+   * 
+ * + * string image_version = 1; + */ + java.lang.String getImageVersion(); + /** + *
+   * Optional. The version of software inside the cluster. It must be one of the supported
+   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * such as "1.2" (including a subminor version, such as "1.2.29"), or the
+   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * If unspecified, it defaults to the latest version.
+   * 
+ * + * string image_version = 1; + */ + com.google.protobuf.ByteString + getImageVersionBytes(); + + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + int getPropertiesCount(); + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. The properties to set on daemon config files.
+   * Property keys are specified in `prefix:property` format, such as
+   * `core:fs.defaultFS`. The following are supported prefixes
+   * and their mappings:
+   * * capacity-scheduler: `capacity-scheduler.xml`
+   * * core:   `core-site.xml`
+   * * distcp: `distcp-default.xml`
+   * * hdfs:   `hdfs-site.xml`
+   * * hive:   `hive-site.xml`
+   * * mapred: `mapred-site.xml`
+   * * pig:    `pig.properties`
+   * * spark:  `spark-defaults.conf`
+   * * yarn:   `yarn-site.xml`
+   * For more information, see
+   * [Cluster properties](/dataproc/docs/concepts/cluster-properties).
+   * 
+ * + * map<string, string> properties = 2; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJob.java new file mode 100644 index 000000000000..2e9628a21302 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJob.java @@ -0,0 +1,2413 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
+ * applications on YARN.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SparkJob} + */ +public final class SparkJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.SparkJob) + SparkJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use SparkJob.newBuilder() to construct. + private SparkJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SparkJob() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SparkJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + driverCase_ = 1; + driver_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + driverCase_ = 2; + driver_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + args_.add(s); + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000008; + } + jarFileUris_.add(s); + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + fileUris_.add(s); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000020; + } + archiveUris_.add(s); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000040; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + case 66: { + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 7: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SparkJob.class, com.google.cloud.dataproc.v1beta2.SparkJob.Builder.class); + } + + private int bitField0_; + private int driverCase_ = 0; + private java.lang.Object driver_; + public enum DriverCase + implements com.google.protobuf.Internal.EnumLite { + MAIN_JAR_FILE_URI(1), + MAIN_CLASS(2), + DRIVER_NOT_SET(0); + private final int value; + private DriverCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DriverCase valueOf(int value) { + return forNumber(value); + } + + public static DriverCase forNumber(int value) { + switch (value) { + case 1: return MAIN_JAR_FILE_URI; + case 2: return MAIN_CLASS; + case 0: return DRIVER_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public DriverCase + getDriverCase() { + return DriverCase.forNumber( + driverCase_); + } + + public static final int MAIN_JAR_FILE_URI_FIELD_NUMBER = 1; + /** + *
+   * The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1; + */ + public java.lang.String getMainJarFileUri() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 1) { + driver_ = s; + } + return s; + } + } + /** + *
+   * The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1; + */ + public com.google.protobuf.ByteString + getMainJarFileUriBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 1) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MAIN_CLASS_FIELD_NUMBER = 2; + /** + *
+   * The name of the driver's main class. The jar file that contains the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + public java.lang.String getMainClass() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 2) { + driver_ = s; + } + return s; + } + } + /** + *
+   * The name of the driver's main class. The jar file that contains the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + public com.google.protobuf.ByteString + getMainClassBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 2) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList args_; + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ProtocolStringList + getArgsList() { + return args_; + } + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + public int getArgsCount() { + return args_.size(); + } + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ByteString + getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_; + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 5; + private com.google.protobuf.LazyStringList fileUris_; + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ProtocolStringList + getFileUrisList() { + return fileUris_; + } + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ByteString + getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList archiveUris_; + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getArchiveUrisList() { + return archiveUris_; + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ByteString + getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + public static final int PROPERTIES_FIELD_NUMBER = 7; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 8; + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_; + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (driverCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, driver_); + } + if (driverCase_ == 2) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, driver_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, args_.getRaw(i)); + } + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, jarFileUris_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, archiveUris_.getRaw(i)); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 7); + if (loggingConfig_ != null) { + output.writeMessage(8, getLoggingConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (driverCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, driver_); + } + if (driverCase_ == 2) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, driver_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getJarFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, properties__); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, getLoggingConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.SparkJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.SparkJob other = (com.google.cloud.dataproc.v1beta2.SparkJob) obj; + + boolean result = true; + result = result && getArgsList() + .equals(other.getArgsList()); + result = result && getJarFileUrisList() + .equals(other.getJarFileUrisList()); + result = result && getFileUrisList() + .equals(other.getFileUrisList()); + result = result && getArchiveUrisList() + .equals(other.getArchiveUrisList()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && (hasLoggingConfig() == other.hasLoggingConfig()); + if (hasLoggingConfig()) { + result = result && getLoggingConfig() + .equals(other.getLoggingConfig()); + } + result = result && getDriverCase().equals( + other.getDriverCase()); + if (!result) return false; + switch (driverCase_) { + case 1: + result = result && getMainJarFileUri() + .equals(other.getMainJarFileUri()); + break; + case 2: + result = result && getMainClass() + .equals(other.getMainClass()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + switch (driverCase_) { + case 1: + hash = (37 * hash) + MAIN_JAR_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainJarFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + MAIN_CLASS_FIELD_NUMBER; + hash = (53 * hash) + getMainClass().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SparkJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.SparkJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
+   * applications on YARN.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SparkJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.SparkJob) + com.google.cloud.dataproc.v1beta2.SparkJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 7: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 7: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SparkJob.class, com.google.cloud.dataproc.v1beta2.SparkJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.SparkJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + internalGetMutableProperties().clear(); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + driverCase_ = 0; + driver_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkJob build() { + com.google.cloud.dataproc.v1beta2.SparkJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkJob buildPartial() { + com.google.cloud.dataproc.v1beta2.SparkJob result = new com.google.cloud.dataproc.v1beta2.SparkJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (driverCase_ == 1) { + result.driver_ = driver_; + } + if (driverCase_ == 2) { + result.driver_ = driver_; + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.args_ = args_; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.jarFileUris_ = jarFileUris_; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.archiveUris_ = archiveUris_; + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + result.driverCase_ = driverCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.SparkJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.SparkJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.SparkJob other) { + if (other == com.google.cloud.dataproc.v1beta2.SparkJob.getDefaultInstance()) return this; + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + switch (other.getDriverCase()) { + case MAIN_JAR_FILE_URI: { + driverCase_ = 1; + driver_ = other.driver_; + onChanged(); + break; + } + case MAIN_CLASS: { + driverCase_ = 2; + driver_ = other.driver_; + onChanged(); + break; + } + case DRIVER_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.SparkJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.SparkJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int driverCase_ = 0; + private java.lang.Object driver_; + public DriverCase + getDriverCase() { + return DriverCase.forNumber( + driverCase_); + } + + public Builder clearDriver() { + driverCase_ = 0; + driver_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + *
+     * The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public java.lang.String getMainJarFileUri() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 1) { + driver_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public com.google.protobuf.ByteString + getMainJarFileUriBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 1) { + ref = driver_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 1) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public Builder setMainJarFileUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + driverCase_ = 1; + driver_ = value; + onChanged(); + return this; + } + /** + *
+     * The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public Builder clearMainJarFileUri() { + if (driverCase_ == 1) { + driverCase_ = 0; + driver_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The HCFS URI of the jar file that contains the main class.
+     * 
+ * + * string main_jar_file_uri = 1; + */ + public Builder setMainJarFileUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + driverCase_ = 1; + driver_ = value; + onChanged(); + return this; + } + + /** + *
+     * The name of the driver's main class. The jar file that contains the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public java.lang.String getMainClass() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (driverCase_ == 2) { + driver_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The name of the driver's main class. The jar file that contains the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public com.google.protobuf.ByteString + getMainClassBytes() { + java.lang.Object ref = ""; + if (driverCase_ == 2) { + ref = driver_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (driverCase_ == 2) { + driver_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The name of the driver's main class. The jar file that contains the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public Builder setMainClass( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + driverCase_ = 2; + driver_ = value; + onChanged(); + return this; + } + /** + *
+     * The name of the driver's main class. The jar file that contains the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public Builder clearMainClass() { + if (driverCase_ == 2) { + driverCase_ = 0; + driver_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The name of the driver's main class. The jar file that contains the class
+     * must be in the default CLASSPATH or specified in `jar_file_uris`.
+     * 
+ * + * string main_class = 2; + */ + public Builder setMainClassBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + driverCase_ = 2; + driver_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000004; + } + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ProtocolStringList + getArgsList() { + return args_.getUnmodifiableView(); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public int getArgsCount() { + return args_.size(); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public com.google.protobuf.ByteString + getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder setArgs( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder addArgs( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder addAllArgs( + java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, args_); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + *
+     * Optional. The arguments to pass to the driver. Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 3; + */ + public Builder addArgsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000008; + } + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder setJarFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addJarFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addAllJarFileUris( + java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jarFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+     * Spark driver and tasks.
+     * 
+ * + * repeated string jar_file_uris = 4; + */ + public Builder addJarFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000010; + } + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ProtocolStringList + getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public com.google.protobuf.ByteString + getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder setFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addAllFileUris( + java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, fileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 5; + */ + public Builder addFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000020; + } + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ProtocolStringList + getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public com.google.protobuf.ByteString + getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder setArchiveUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addArchiveUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addAllArchiveUris( + java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, archiveUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory
+     * of Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 6; + */ + public Builder addArchiveUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure Spark.
+     * Properties that conflict with values set by the Cloud Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 7; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> loggingConfigBuilder_; + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1beta2.LoggingConfig.newBuilder(loggingConfig_).mergeFrom(value).buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null ? + com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder>( + getLoggingConfig(), + getParentForChildren(), + isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.SparkJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkJob) + private static final com.google.cloud.dataproc.v1beta2.SparkJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.SparkJob(); + } + + public static com.google.cloud.dataproc.v1beta2.SparkJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SparkJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SparkJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJobOrBuilder.java new file mode 100644 index 000000000000..48535957b65a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJobOrBuilder.java @@ -0,0 +1,307 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface SparkJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.SparkJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1; + */ + java.lang.String getMainJarFileUri(); + /** + *
+   * The HCFS URI of the jar file that contains the main class.
+   * 
+ * + * string main_jar_file_uri = 1; + */ + com.google.protobuf.ByteString + getMainJarFileUriBytes(); + + /** + *
+   * The name of the driver's main class. The jar file that contains the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + java.lang.String getMainClass(); + /** + *
+   * The name of the driver's main class. The jar file that contains the class
+   * must be in the default CLASSPATH or specified in `jar_file_uris`.
+   * 
+ * + * string main_class = 2; + */ + com.google.protobuf.ByteString + getMainClassBytes(); + + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + java.util.List + getArgsList(); + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + int getArgsCount(); + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + java.lang.String getArgs(int index); + /** + *
+   * Optional. The arguments to pass to the driver. Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 3; + */ + com.google.protobuf.ByteString + getArgsBytes(int index); + + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + java.util.List + getJarFileUrisList(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + int getJarFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + java.lang.String getJarFileUris(int index); + /** + *
+   * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
+   * Spark driver and tasks.
+   * 
+ * + * repeated string jar_file_uris = 4; + */ + com.google.protobuf.ByteString + getJarFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + java.util.List + getFileUrisList(); + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + int getFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + java.lang.String getFileUris(int index); + /** + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * Spark drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 5; + */ + com.google.protobuf.ByteString + getFileUrisBytes(int index); + + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + java.util.List + getArchiveUrisList(); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + int getArchiveUrisCount(); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + java.lang.String getArchiveUris(int index); + /** + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory
+   * of Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 6; + */ + com.google.protobuf.ByteString + getArchiveUrisBytes(int index); + + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + int getPropertiesCount(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. A mapping of property names to values, used to configure Spark.
+   * Properties that conflict with values set by the Cloud Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 7; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + boolean hasLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 8; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.SparkJob.DriverCase getDriverCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java new file mode 100644 index 000000000000..2e16f9fb036e --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java @@ -0,0 +1,2021 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
+ * queries.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SparkSqlJob} + */ +public final class SparkSqlJob extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.SparkSqlJob) + SparkSqlJobOrBuilder { +private static final long serialVersionUID = 0L; + // Use SparkSqlJob.newBuilder() to construct. + private SparkSqlJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SparkSqlJob() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SparkSqlJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + queriesCase_ = 1; + queries_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.QueryList.Builder subBuilder = null; + if (queriesCase_ == 2) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.QueryList) queries_).toBuilder(); + } + queries_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.QueryList.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.QueryList) queries_); + queries_ = subBuilder.buildPartial(); + } + queriesCase_ = 2; + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + scriptVariables_ = com.google.protobuf.MapField.newMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000004; + } + com.google.protobuf.MapEntry + scriptVariables__ = input.readMessage( + ScriptVariablesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + scriptVariables_.getMutableMap().put( + scriptVariables__.getKey(), scriptVariables__.getValue()); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000008; + } + com.google.protobuf.MapEntry + properties__ = input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + properties_.getMutableMap().put( + properties__.getKey(), properties__.getValue()); + break; + } + case 50: { + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = input.readMessage(com.google.cloud.dataproc.v1beta2.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + case 450: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + jarFileUris_.add(s); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 3: + return internalGetScriptVariables(); + case 4: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SparkSqlJob.class, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder.class); + } + + private int bitField0_; + private int queriesCase_ = 0; + private java.lang.Object queries_; + public enum QueriesCase + implements com.google.protobuf.Internal.EnumLite { + QUERY_FILE_URI(1), + QUERY_LIST(2), + QUERIES_NOT_SET(0); + private final int value; + private QueriesCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static QueriesCase valueOf(int value) { + return forNumber(value); + } + + public static QueriesCase forNumber(int value) { + switch (value) { + case 1: return QUERY_FILE_URI; + case 2: return QUERY_LIST; + case 0: return QUERIES_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public QueriesCase + getQueriesCase() { + return QueriesCase.forNumber( + queriesCase_); + } + + public static final int QUERY_FILE_URI_FIELD_NUMBER = 1; + /** + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } + } + /** + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + */ + public com.google.protobuf.ByteString + getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int QUERY_LIST_FIELD_NUMBER = 2; + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList getQueryList() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + + public static final int SCRIPT_VARIABLES_FIELD_NUMBER = 3; + private static final class ScriptVariablesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_ScriptVariablesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> scriptVariables_; + private com.google.protobuf.MapField + internalGetScriptVariables() { + if (scriptVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + return scriptVariables_; + } + + public int getScriptVariablesCount() { + return internalGetScriptVariables().getMap().size(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + + public boolean containsScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetScriptVariables().getMap().containsKey(key); + } + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getScriptVariables() { + return getScriptVariablesMap(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + + public java.util.Map getScriptVariablesMap() { + return internalGetScriptVariables().getMap(); + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + + public java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + + public java.lang.String getScriptVariablesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int PROPERTIES_FIELD_NUMBER = 4; + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int JAR_FILE_URIS_FIELD_NUMBER = 56; + private com.google.protobuf.LazyStringList jarFileUris_; + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_; + } + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 6; + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_; + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (queriesCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queries_); + } + if (queriesCase_ == 2) { + output.writeMessage(2, (com.google.cloud.dataproc.v1beta2.QueryList) queries_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetScriptVariables(), + ScriptVariablesDefaultEntryHolder.defaultEntry, + 3); + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetProperties(), + PropertiesDefaultEntryHolder.defaultEntry, + 4); + if (loggingConfig_ != null) { + output.writeMessage(6, getLoggingConfig()); + } + for (int i = 0; i < jarFileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 56, jarFileUris_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (queriesCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, queries_); + } + if (queriesCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, (com.google.cloud.dataproc.v1beta2.QueryList) queries_); + } + for (java.util.Map.Entry entry + : internalGetScriptVariables().getMap().entrySet()) { + com.google.protobuf.MapEntry + scriptVariables__ = ScriptVariablesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, scriptVariables__); + } + for (java.util.Map.Entry entry + : internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry + properties__ = PropertiesDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, properties__); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, getLoggingConfig()); + } + { + int dataSize = 0; + for (int i = 0; i < jarFileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(jarFileUris_.getRaw(i)); + } + size += dataSize; + size += 2 * getJarFileUrisList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.SparkSqlJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.SparkSqlJob other = (com.google.cloud.dataproc.v1beta2.SparkSqlJob) obj; + + boolean result = true; + result = result && internalGetScriptVariables().equals( + other.internalGetScriptVariables()); + result = result && internalGetProperties().equals( + other.internalGetProperties()); + result = result && getJarFileUrisList() + .equals(other.getJarFileUrisList()); + result = result && (hasLoggingConfig() == other.hasLoggingConfig()); + if (hasLoggingConfig()) { + result = result && getLoggingConfig() + .equals(other.getLoggingConfig()); + } + result = result && getQueriesCase().equals( + other.getQueriesCase()); + if (!result) return false; + switch (queriesCase_) { + case 1: + result = result && getQueryFileUri() + .equals(other.getQueryFileUri()); + break; + case 2: + result = result && getQueryList() + .equals(other.getQueryList()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetScriptVariables().getMap().isEmpty()) { + hash = (37 * hash) + SCRIPT_VARIABLES_FIELD_NUMBER; + hash = (53 * hash) + internalGetScriptVariables().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (getJarFileUrisCount() > 0) { + hash = (37 * hash) + JAR_FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getJarFileUrisList().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + switch (queriesCase_) { + case 1: + hash = (37 * hash) + QUERY_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getQueryFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + QUERY_LIST_FIELD_NUMBER; + hash = (53 * hash) + getQueryList().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.SparkSqlJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
+   * queries.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SparkSqlJob} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.SparkSqlJob) + com.google.cloud.dataproc.v1beta2.SparkSqlJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 3: + return internalGetScriptVariables(); + case 4: + return internalGetProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 3: + return internalGetMutableScriptVariables(); + case 4: + return internalGetMutableProperties(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SparkSqlJob.class, com.google.cloud.dataproc.v1beta2.SparkSqlJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.SparkSqlJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + internalGetMutableScriptVariables().clear(); + internalGetMutableProperties().clear(); + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + queriesCase_ = 0; + queries_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SparkSqlJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkSqlJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkSqlJob build() { + com.google.cloud.dataproc.v1beta2.SparkSqlJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkSqlJob buildPartial() { + com.google.cloud.dataproc.v1beta2.SparkSqlJob result = new com.google.cloud.dataproc.v1beta2.SparkSqlJob(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (queriesCase_ == 1) { + result.queries_ = queries_; + } + if (queriesCase_ == 2) { + if (queryListBuilder_ == null) { + result.queries_ = queries_; + } else { + result.queries_ = queryListBuilder_.build(); + } + } + result.scriptVariables_ = internalGetScriptVariables(); + result.scriptVariables_.makeImmutable(); + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + jarFileUris_ = jarFileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.jarFileUris_ = jarFileUris_; + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + result.queriesCase_ = queriesCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.SparkSqlJob) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.SparkSqlJob)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.SparkSqlJob other) { + if (other == com.google.cloud.dataproc.v1beta2.SparkSqlJob.getDefaultInstance()) return this; + internalGetMutableScriptVariables().mergeFrom( + other.internalGetScriptVariables()); + internalGetMutableProperties().mergeFrom( + other.internalGetProperties()); + if (!other.jarFileUris_.isEmpty()) { + if (jarFileUris_.isEmpty()) { + jarFileUris_ = other.jarFileUris_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureJarFileUrisIsMutable(); + jarFileUris_.addAll(other.jarFileUris_); + } + onChanged(); + } + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + switch (other.getQueriesCase()) { + case QUERY_FILE_URI: { + queriesCase_ = 1; + queries_ = other.queries_; + onChanged(); + break; + } + case QUERY_LIST: { + mergeQueryList(other.getQueryList()); + break; + } + case QUERIES_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.SparkSqlJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.SparkSqlJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int queriesCase_ = 0; + private java.lang.Object queries_; + public QueriesCase + getQueriesCase() { + return QueriesCase.forNumber( + queriesCase_); + } + + public Builder clearQueries() { + queriesCase_ = 0; + queries_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public com.google.protobuf.ByteString + getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder setQueryFileUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + /** + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder clearQueryFileUri() { + if (queriesCase_ == 1) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + */ + public Builder setQueryFileUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder> queryListBuilder_; + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList getQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } else { + if (queriesCase_ == 2) { + return queryListBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder setQueryList(com.google.cloud.dataproc.v1beta2.QueryList value) { + if (queryListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queries_ = value; + onChanged(); + } else { + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder setQueryList( + com.google.cloud.dataproc.v1beta2.QueryList.Builder builderForValue) { + if (queryListBuilder_ == null) { + queries_ = builderForValue.build(); + onChanged(); + } else { + queryListBuilder_.setMessage(builderForValue.build()); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder mergeQueryList(com.google.cloud.dataproc.v1beta2.QueryList value) { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2 && + queries_ != com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance()) { + queries_ = com.google.cloud.dataproc.v1beta2.QueryList.newBuilder((com.google.cloud.dataproc.v1beta2.QueryList) queries_) + .mergeFrom(value).buildPartial(); + } else { + queries_ = value; + } + onChanged(); + } else { + if (queriesCase_ == 2) { + queryListBuilder_.mergeFrom(value); + } + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public Builder clearQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + } else { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + } + queryListBuilder_.clear(); + } + return this; + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryList.Builder getQueryListBuilder() { + return getQueryListFieldBuilder().getBuilder(); + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder() { + if ((queriesCase_ == 2) && (queryListBuilder_ != null)) { + return queryListBuilder_.getMessageOrBuilder(); + } else { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.QueryList) queries_; + } + return com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + } + /** + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder> + getQueryListFieldBuilder() { + if (queryListBuilder_ == null) { + if (!(queriesCase_ == 2)) { + queries_ = com.google.cloud.dataproc.v1beta2.QueryList.getDefaultInstance(); + } + queryListBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.QueryList, com.google.cloud.dataproc.v1beta2.QueryList.Builder, com.google.cloud.dataproc.v1beta2.QueryListOrBuilder>( + (com.google.cloud.dataproc.v1beta2.QueryList) queries_, + getParentForChildren(), + isClean()); + queries_ = null; + } + queriesCase_ = 2; + onChanged();; + return queryListBuilder_; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> scriptVariables_; + private com.google.protobuf.MapField + internalGetScriptVariables() { + if (scriptVariables_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + return scriptVariables_; + } + private com.google.protobuf.MapField + internalGetMutableScriptVariables() { + onChanged();; + if (scriptVariables_ == null) { + scriptVariables_ = com.google.protobuf.MapField.newMapField( + ScriptVariablesDefaultEntryHolder.defaultEntry); + } + if (!scriptVariables_.isMutable()) { + scriptVariables_ = scriptVariables_.copy(); + } + return scriptVariables_; + } + + public int getScriptVariablesCount() { + return internalGetScriptVariables().getMap().size(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + + public boolean containsScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetScriptVariables().getMap().containsKey(key); + } + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getScriptVariables() { + return getScriptVariablesMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + + public java.util.Map getScriptVariablesMap() { + return internalGetScriptVariables().getMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + + public java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + + public java.lang.String getScriptVariablesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetScriptVariables().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearScriptVariables() { + internalGetMutableScriptVariables().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + + public Builder removeScriptVariables( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableScriptVariables().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableScriptVariables() { + return internalGetMutableScriptVariables().getMutableMap(); + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + public Builder putScriptVariables( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableScriptVariables().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. Mapping of query variable names to values (equivalent to the
+     * Spark SQL command: SET `name="value";`).
+     * 
+ * + * map<string, string> script_variables = 3; + */ + + public Builder putAllScriptVariables( + java.util.Map values) { + internalGetMutableScriptVariables().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> properties_; + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged();; + if (properties_ == null) { + properties_ = com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + + public boolean containsProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetProperties().getMap().containsKey(key); + } + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + + public java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + + public java.lang.String getPropertiesOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + + public Builder removeProperties( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + public Builder putProperties( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableProperties().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. A mapping of property names to values, used to configure
+     * Spark SQL's SparkConf. Properties that conflict with values set by the
+     * Cloud Dataproc API may be overwritten.
+     * 
+ * + * map<string, string> properties = 4; + */ + + public Builder putAllProperties( + java.util.Map values) { + internalGetMutableProperties().getMutableMap() + .putAll(values); + return this; + } + + private com.google.protobuf.LazyStringList jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureJarFileUrisIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + jarFileUris_ = new com.google.protobuf.LazyStringArrayList(jarFileUris_); + bitField0_ |= 0x00000010; + } + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public com.google.protobuf.ProtocolStringList + getJarFileUrisList() { + return jarFileUris_.getUnmodifiableView(); + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public int getJarFileUrisCount() { + return jarFileUris_.size(); + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public java.lang.String getJarFileUris(int index) { + return jarFileUris_.get(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public com.google.protobuf.ByteString + getJarFileUrisBytes(int index) { + return jarFileUris_.getByteString(index); + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public Builder setJarFileUris( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public Builder addJarFileUris( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public Builder addAllJarFileUris( + java.lang.Iterable values) { + ensureJarFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jarFileUris_); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public Builder clearJarFileUris() { + jarFileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + *
+     * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+     * 
+ * + * repeated string jar_file_uris = 56; + */ + public Builder addJarFileUrisBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureJarFileUrisIsMutable(); + jarFileUris_.add(value); + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.LoggingConfig loggingConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> loggingConfigBuilder_; + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null ? com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1beta2.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1beta2.LoggingConfig.newBuilder(loggingConfig_).mergeFrom(value).buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + public com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null ? + com.google.cloud.dataproc.v1beta2.LoggingConfig.getDefaultInstance() : loggingConfig_; + } + } + /** + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.LoggingConfig, com.google.cloud.dataproc.v1beta2.LoggingConfig.Builder, com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder>( + getLoggingConfig(), + getParentForChildren(), + isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.SparkSqlJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkSqlJob) + private static final com.google.cloud.dataproc.v1beta2.SparkSqlJob DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.SparkSqlJob(); + } + + public static com.google.cloud.dataproc.v1beta2.SparkSqlJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SparkSqlJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SparkSqlJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SparkSqlJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJobOrBuilder.java new file mode 100644 index 000000000000..0637bd247a06 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJobOrBuilder.java @@ -0,0 +1,237 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface SparkSqlJobOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.SparkSqlJob) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + */ + java.lang.String getQueryFileUri(); + /** + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + */ + com.google.protobuf.ByteString + getQueryFileUriBytes(); + + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + boolean hasQueryList(); + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1beta2.QueryList getQueryList(); + /** + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilder(); + + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + int getScriptVariablesCount(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + boolean containsScriptVariables( + java.lang.String key); + /** + * Use {@link #getScriptVariablesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getScriptVariables(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + java.util.Map + getScriptVariablesMap(); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + + java.lang.String getScriptVariablesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. Mapping of query variable names to values (equivalent to the
+   * Spark SQL command: SET `name="value";`).
+   * 
+ * + * map<string, string> script_variables = 3; + */ + + java.lang.String getScriptVariablesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + int getPropertiesCount(); + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + boolean containsProperties( + java.lang.String key); + /** + * Use {@link #getPropertiesMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getProperties(); + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + java.util.Map + getPropertiesMap(); + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + + java.lang.String getPropertiesOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. A mapping of property names to values, used to configure
+   * Spark SQL's SparkConf. Properties that conflict with values set by the
+   * Cloud Dataproc API may be overwritten.
+   * 
+ * + * map<string, string> properties = 4; + */ + + java.lang.String getPropertiesOrThrow( + java.lang.String key); + + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + java.util.List + getJarFileUrisList(); + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + int getJarFileUrisCount(); + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + java.lang.String getJarFileUris(int index); + /** + *
+   * Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
+   * 
+ * + * repeated string jar_file_uris = 56; + */ + com.google.protobuf.ByteString + getJarFileUrisBytes(int index); + + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + boolean hasLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfig getLoggingConfig(); + /** + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.LoggingConfig logging_config = 6; + */ + com.google.cloud.dataproc.v1beta2.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.SparkSqlJob.QueriesCase getQueriesCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java new file mode 100644 index 000000000000..341f27f258cd --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java @@ -0,0 +1,1179 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to submit a job.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SubmitJobRequest} + */ +public final class SubmitJobRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.SubmitJobRequest) + SubmitJobRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use SubmitJobRequest.newBuilder() to construct. + private SubmitJobRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SubmitJobRequest() { + projectId_ = ""; + region_ = ""; + requestId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SubmitJobRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.Job.Builder subBuilder = null; + if (job_ != null) { + subBuilder = job_.toBuilder(); + } + job_ = input.readMessage(com.google.cloud.dataproc.v1beta2.Job.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(job_); + job_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SubmitJobRequest.class, com.google.cloud.dataproc.v1beta2.SubmitJobRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 3; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_FIELD_NUMBER = 2; + private com.google.cloud.dataproc.v1beta2.Job job_; + /** + *
+   * Required. The job resource.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public boolean hasJob() { + return job_ != null; + } + /** + *
+   * Required. The job resource.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public com.google.cloud.dataproc.v1beta2.Job getJob() { + return job_ == null ? com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance() : job_; + } + /** + *
+   * Required. The job resource.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder() { + return getJob(); + } + + public static final int REQUEST_ID_FIELD_NUMBER = 4; + private volatile java.lang.Object requestId_; + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (job_ != null) { + output.writeMessage(2, getJob()); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_); + } + if (!getRequestIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (job_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getJob()); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_); + } + if (!getRequestIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.SubmitJobRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.SubmitJobRequest other = (com.google.cloud.dataproc.v1beta2.SubmitJobRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && (hasJob() == other.hasJob()); + if (hasJob()) { + result = result && getJob() + .equals(other.getJob()); + } + result = result && getRequestId() + .equals(other.getRequestId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + if (hasJob()) { + hash = (37 * hash) + JOB_FIELD_NUMBER; + hash = (53 * hash) + getJob().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.SubmitJobRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to submit a job.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.SubmitJobRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.SubmitJobRequest) + com.google.cloud.dataproc.v1beta2.SubmitJobRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.SubmitJobRequest.class, com.google.cloud.dataproc.v1beta2.SubmitJobRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.SubmitJobRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + if (jobBuilder_ == null) { + job_ = null; + } else { + job_ = null; + jobBuilder_ = null; + } + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_SubmitJobRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SubmitJobRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.SubmitJobRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SubmitJobRequest build() { + com.google.cloud.dataproc.v1beta2.SubmitJobRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SubmitJobRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.SubmitJobRequest result = new com.google.cloud.dataproc.v1beta2.SubmitJobRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + if (jobBuilder_ == null) { + result.job_ = job_; + } else { + result.job_ = jobBuilder_.build(); + } + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.SubmitJobRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.SubmitJobRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.SubmitJobRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.SubmitJobRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (other.hasJob()) { + mergeJob(other.getJob()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.SubmitJobRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.SubmitJobRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 3; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.Job job_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder> jobBuilder_; + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public boolean hasJob() { + return jobBuilder_ != null || job_ != null; + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public com.google.cloud.dataproc.v1beta2.Job getJob() { + if (jobBuilder_ == null) { + return job_ == null ? com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance() : job_; + } else { + return jobBuilder_.getMessage(); + } + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public Builder setJob(com.google.cloud.dataproc.v1beta2.Job value) { + if (jobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + job_ = value; + onChanged(); + } else { + jobBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public Builder setJob( + com.google.cloud.dataproc.v1beta2.Job.Builder builderForValue) { + if (jobBuilder_ == null) { + job_ = builderForValue.build(); + onChanged(); + } else { + jobBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public Builder mergeJob(com.google.cloud.dataproc.v1beta2.Job value) { + if (jobBuilder_ == null) { + if (job_ != null) { + job_ = + com.google.cloud.dataproc.v1beta2.Job.newBuilder(job_).mergeFrom(value).buildPartial(); + } else { + job_ = value; + } + onChanged(); + } else { + jobBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public Builder clearJob() { + if (jobBuilder_ == null) { + job_ = null; + onChanged(); + } else { + job_ = null; + jobBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public com.google.cloud.dataproc.v1beta2.Job.Builder getJobBuilder() { + + onChanged(); + return getJobFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder() { + if (jobBuilder_ != null) { + return jobBuilder_.getMessageOrBuilder(); + } else { + return job_ == null ? + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance() : job_; + } + } + /** + *
+     * Required. The job resource.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder> + getJobFieldBuilder() { + if (jobBuilder_ == null) { + jobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder>( + getJob(), + getParentForChildren(), + isClean()); + job_ = null; + } + return jobBuilder_; + } + + private java.lang.Object requestId_ = ""; + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public Builder setRequestId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 4; + */ + public Builder setRequestIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.SubmitJobRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SubmitJobRequest) + private static final com.google.cloud.dataproc.v1beta2.SubmitJobRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.SubmitJobRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.SubmitJobRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SubmitJobRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SubmitJobRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.SubmitJobRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java new file mode 100644 index 000000000000..acaf6424cb57 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java @@ -0,0 +1,106 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface SubmitJobRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.SubmitJobRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 3; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The job resource.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + boolean hasJob(); + /** + *
+   * Required. The job resource.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + com.google.cloud.dataproc.v1beta2.Job getJob(); + /** + *
+   * Required. The job resource.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 2; + */ + com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder(); + + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + java.lang.String getRequestId(); + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 4; + */ + com.google.protobuf.ByteString + getRequestIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java new file mode 100644 index 000000000000..c20d93d0efbe --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java @@ -0,0 +1,2404 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to update a cluster.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.UpdateClusterRequest} + */ +public final class UpdateClusterRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.UpdateClusterRequest) + UpdateClusterRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use UpdateClusterRequest.newBuilder() to construct. + private UpdateClusterRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private UpdateClusterRequest() { + projectId_ = ""; + region_ = ""; + clusterName_ = ""; + requestId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateClusterRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.Cluster.Builder subBuilder = null; + if (cluster_ != null) { + subBuilder = cluster_.toBuilder(); + } + cluster_ = input.readMessage(com.google.cloud.dataproc.v1beta2.Cluster.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(cluster_); + cluster_ = subBuilder.buildPartial(); + } + + break; + } + case 34: { + com.google.protobuf.FieldMask.Builder subBuilder = null; + if (updateMask_ != null) { + subBuilder = updateMask_.toBuilder(); + } + updateMask_ = input.readMessage(com.google.protobuf.FieldMask.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(updateMask_); + updateMask_ = subBuilder.buildPartial(); + } + + break; + } + case 42: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 50: { + com.google.protobuf.Duration.Builder subBuilder = null; + if (gracefulDecommissionTimeout_ != null) { + subBuilder = gracefulDecommissionTimeout_.toBuilder(); + } + gracefulDecommissionTimeout_ = input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(gracefulDecommissionTimeout_); + gracefulDecommissionTimeout_ = subBuilder.buildPartial(); + } + + break; + } + case 58: { + java.lang.String s = input.readStringRequireUtf8(); + + requestId_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.class, com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 5; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 5; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 5; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object clusterName_; + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLUSTER_FIELD_NUMBER = 3; + private com.google.cloud.dataproc.v1beta2.Cluster cluster_; + /** + *
+   * Required. The changes to the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public boolean hasCluster() { + return cluster_ != null; + } + /** + *
+   * Required. The changes to the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.Cluster getCluster() { + return cluster_ == null ? com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance() : cluster_; + } + /** + *
+   * Required. The changes to the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder() { + return getCluster(); + } + + public static final int GRACEFUL_DECOMMISSION_TIMEOUT_FIELD_NUMBER = 6; + private com.google.protobuf.Duration gracefulDecommissionTimeout_; + /** + *
+   * Optional. Timeout for graceful YARN decomissioning. Graceful
+   * decommissioning allows removing nodes from the cluster without
+   * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+   * in progress to finish before forcefully removing nodes (and potentially
+   * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+   * the maximum allowed timeout is 1 day.
+   * Only supported on Dataproc image versions 1.2 and higher.
+   * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public boolean hasGracefulDecommissionTimeout() { + return gracefulDecommissionTimeout_ != null; + } + /** + *
+   * Optional. Timeout for graceful YARN decomissioning. Graceful
+   * decommissioning allows removing nodes from the cluster without
+   * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+   * in progress to finish before forcefully removing nodes (and potentially
+   * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+   * the maximum allowed timeout is 1 day.
+   * Only supported on Dataproc image versions 1.2 and higher.
+   * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public com.google.protobuf.Duration getGracefulDecommissionTimeout() { + return gracefulDecommissionTimeout_ == null ? com.google.protobuf.Duration.getDefaultInstance() : gracefulDecommissionTimeout_; + } + /** + *
+   * Optional. Timeout for graceful YARN decomissioning. Graceful
+   * decommissioning allows removing nodes from the cluster without
+   * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+   * in progress to finish before forcefully removing nodes (and potentially
+   * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+   * the maximum allowed timeout is 1 day.
+   * Only supported on Dataproc image versions 1.2 and higher.
+   * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBuilder() { + return getGracefulDecommissionTimeout(); + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 4; + private com.google.protobuf.FieldMask updateMask_; + /** + *
+   * Required. Specifies the path, relative to `Cluster`, of
+   * the field to update. For example, to change the number of workers
+   * in a cluster to 5, the `update_mask` parameter would be
+   * specified as `config.worker_config.num_instances`,
+   * and the `PATCH` request body would specify the new value, as follows:
+   *     {
+   *       "config":{
+   *         "workerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * Similarly, to change the number of preemptible workers in a cluster to 5, the
+   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+   * and the `PATCH` request body would be set as follows:
+   *     {
+   *       "config":{
+   *         "secondaryWorkerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * <strong>Note:</strong> currently only the following fields can be updated:
+   * <table>
+   * <tr>
+   * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+   * </tr>
+   * <tr>
+   * <td>labels</td><td>Updates labels</td>
+   * </tr>
+   * <tr>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * </tr>
+   * </table>
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public boolean hasUpdateMask() { + return updateMask_ != null; + } + /** + *
+   * Required. Specifies the path, relative to `Cluster`, of
+   * the field to update. For example, to change the number of workers
+   * in a cluster to 5, the `update_mask` parameter would be
+   * specified as `config.worker_config.num_instances`,
+   * and the `PATCH` request body would specify the new value, as follows:
+   *     {
+   *       "config":{
+   *         "workerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * Similarly, to change the number of preemptible workers in a cluster to 5, the
+   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+   * and the `PATCH` request body would be set as follows:
+   *     {
+   *       "config":{
+   *         "secondaryWorkerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * <strong>Note:</strong> currently only the following fields can be updated:
+   * <table>
+   * <tr>
+   * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+   * </tr>
+   * <tr>
+   * <td>labels</td><td>Updates labels</td>
+   * </tr>
+   * <tr>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * </tr>
+   * </table>
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + /** + *
+   * Required. Specifies the path, relative to `Cluster`, of
+   * the field to update. For example, to change the number of workers
+   * in a cluster to 5, the `update_mask` parameter would be
+   * specified as `config.worker_config.num_instances`,
+   * and the `PATCH` request body would specify the new value, as follows:
+   *     {
+   *       "config":{
+   *         "workerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * Similarly, to change the number of preemptible workers in a cluster to 5, the
+   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+   * and the `PATCH` request body would be set as follows:
+   *     {
+   *       "config":{
+   *         "secondaryWorkerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * <strong>Note:</strong> currently only the following fields can be updated:
+   * <table>
+   * <tr>
+   * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+   * </tr>
+   * <tr>
+   * <td>labels</td><td>Updates labels</td>
+   * </tr>
+   * <tr>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * </tr>
+   * </table>
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return getUpdateMask(); + } + + public static final int REQUEST_ID_FIELD_NUMBER = 7; + private volatile java.lang.Object requestId_; + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 7; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } + } + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 7; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_); + } + if (cluster_ != null) { + output.writeMessage(3, getCluster()); + } + if (updateMask_ != null) { + output.writeMessage(4, getUpdateMask()); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, region_); + } + if (gracefulDecommissionTimeout_ != null) { + output.writeMessage(6, getGracefulDecommissionTimeout()); + } + if (!getRequestIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, requestId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_); + } + if (cluster_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getCluster()); + } + if (updateMask_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getUpdateMask()); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, region_); + } + if (gracefulDecommissionTimeout_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, getGracefulDecommissionTimeout()); + } + if (!getRequestIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, requestId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.UpdateClusterRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest other = (com.google.cloud.dataproc.v1beta2.UpdateClusterRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && (hasCluster() == other.hasCluster()); + if (hasCluster()) { + result = result && getCluster() + .equals(other.getCluster()); + } + result = result && (hasGracefulDecommissionTimeout() == other.hasGracefulDecommissionTimeout()); + if (hasGracefulDecommissionTimeout()) { + result = result && getGracefulDecommissionTimeout() + .equals(other.getGracefulDecommissionTimeout()); + } + result = result && (hasUpdateMask() == other.hasUpdateMask()); + if (hasUpdateMask()) { + result = result && getUpdateMask() + .equals(other.getUpdateMask()); + } + result = result && getRequestId() + .equals(other.getRequestId()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + if (hasCluster()) { + hash = (37 * hash) + CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getCluster().hashCode(); + } + if (hasGracefulDecommissionTimeout()) { + hash = (37 * hash) + GRACEFUL_DECOMMISSION_TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + getGracefulDecommissionTimeout().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (37 * hash) + REQUEST_ID_FIELD_NUMBER; + hash = (53 * hash) + getRequestId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to update a cluster.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.UpdateClusterRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.UpdateClusterRequest) + com.google.cloud.dataproc.v1beta2.UpdateClusterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.class, com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + clusterName_ = ""; + + if (clusterBuilder_ == null) { + cluster_ = null; + } else { + cluster_ = null; + clusterBuilder_ = null; + } + if (gracefulDecommissionTimeoutBuilder_ == null) { + gracefulDecommissionTimeout_ = null; + } else { + gracefulDecommissionTimeout_ = null; + gracefulDecommissionTimeoutBuilder_ = null; + } + if (updateMaskBuilder_ == null) { + updateMask_ = null; + } else { + updateMask_ = null; + updateMaskBuilder_ = null; + } + requestId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_UpdateClusterRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateClusterRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateClusterRequest build() { + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateClusterRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest result = new com.google.cloud.dataproc.v1beta2.UpdateClusterRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.clusterName_ = clusterName_; + if (clusterBuilder_ == null) { + result.cluster_ = cluster_; + } else { + result.cluster_ = clusterBuilder_.build(); + } + if (gracefulDecommissionTimeoutBuilder_ == null) { + result.gracefulDecommissionTimeout_ = gracefulDecommissionTimeout_; + } else { + result.gracefulDecommissionTimeout_ = gracefulDecommissionTimeoutBuilder_.build(); + } + if (updateMaskBuilder_ == null) { + result.updateMask_ = updateMask_; + } else { + result.updateMask_ = updateMaskBuilder_.build(); + } + result.requestId_ = requestId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.UpdateClusterRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.UpdateClusterRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.UpdateClusterRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + if (other.hasCluster()) { + mergeCluster(other.getCluster()); + } + if (other.hasGracefulDecommissionTimeout()) { + mergeGracefulDecommissionTimeout(other.getGracefulDecommissionTimeout()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (!other.getRequestId().isEmpty()) { + requestId_ = other.requestId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.UpdateClusterRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.UpdateClusterRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project the
+     * cluster belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 5; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 5; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 5; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 5; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 5; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Required. The cluster name.
+     * 
+ * + * string cluster_name = 2; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.Cluster cluster_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder> clusterBuilder_; + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public boolean hasCluster() { + return clusterBuilder_ != null || cluster_ != null; + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.Cluster getCluster() { + if (clusterBuilder_ == null) { + return cluster_ == null ? com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance() : cluster_; + } else { + return clusterBuilder_.getMessage(); + } + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public Builder setCluster(com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cluster_ = value; + onChanged(); + } else { + clusterBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public Builder setCluster( + com.google.cloud.dataproc.v1beta2.Cluster.Builder builderForValue) { + if (clusterBuilder_ == null) { + cluster_ = builderForValue.build(); + onChanged(); + } else { + clusterBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public Builder mergeCluster(com.google.cloud.dataproc.v1beta2.Cluster value) { + if (clusterBuilder_ == null) { + if (cluster_ != null) { + cluster_ = + com.google.cloud.dataproc.v1beta2.Cluster.newBuilder(cluster_).mergeFrom(value).buildPartial(); + } else { + cluster_ = value; + } + onChanged(); + } else { + clusterBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public Builder clearCluster() { + if (clusterBuilder_ == null) { + cluster_ = null; + onChanged(); + } else { + cluster_ = null; + clusterBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.Cluster.Builder getClusterBuilder() { + + onChanged(); + return getClusterFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder() { + if (clusterBuilder_ != null) { + return clusterBuilder_.getMessageOrBuilder(); + } else { + return cluster_ == null ? + com.google.cloud.dataproc.v1beta2.Cluster.getDefaultInstance() : cluster_; + } + } + /** + *
+     * Required. The changes to the cluster.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder> + getClusterFieldBuilder() { + if (clusterBuilder_ == null) { + clusterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Cluster, com.google.cloud.dataproc.v1beta2.Cluster.Builder, com.google.cloud.dataproc.v1beta2.ClusterOrBuilder>( + getCluster(), + getParentForChildren(), + isClean()); + cluster_ = null; + } + return clusterBuilder_; + } + + private com.google.protobuf.Duration gracefulDecommissionTimeout_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> gracefulDecommissionTimeoutBuilder_; + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public boolean hasGracefulDecommissionTimeout() { + return gracefulDecommissionTimeoutBuilder_ != null || gracefulDecommissionTimeout_ != null; + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public com.google.protobuf.Duration getGracefulDecommissionTimeout() { + if (gracefulDecommissionTimeoutBuilder_ == null) { + return gracefulDecommissionTimeout_ == null ? com.google.protobuf.Duration.getDefaultInstance() : gracefulDecommissionTimeout_; + } else { + return gracefulDecommissionTimeoutBuilder_.getMessage(); + } + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public Builder setGracefulDecommissionTimeout(com.google.protobuf.Duration value) { + if (gracefulDecommissionTimeoutBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + gracefulDecommissionTimeout_ = value; + onChanged(); + } else { + gracefulDecommissionTimeoutBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public Builder setGracefulDecommissionTimeout( + com.google.protobuf.Duration.Builder builderForValue) { + if (gracefulDecommissionTimeoutBuilder_ == null) { + gracefulDecommissionTimeout_ = builderForValue.build(); + onChanged(); + } else { + gracefulDecommissionTimeoutBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public Builder mergeGracefulDecommissionTimeout(com.google.protobuf.Duration value) { + if (gracefulDecommissionTimeoutBuilder_ == null) { + if (gracefulDecommissionTimeout_ != null) { + gracefulDecommissionTimeout_ = + com.google.protobuf.Duration.newBuilder(gracefulDecommissionTimeout_).mergeFrom(value).buildPartial(); + } else { + gracefulDecommissionTimeout_ = value; + } + onChanged(); + } else { + gracefulDecommissionTimeoutBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public Builder clearGracefulDecommissionTimeout() { + if (gracefulDecommissionTimeoutBuilder_ == null) { + gracefulDecommissionTimeout_ = null; + onChanged(); + } else { + gracefulDecommissionTimeout_ = null; + gracefulDecommissionTimeoutBuilder_ = null; + } + + return this; + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public com.google.protobuf.Duration.Builder getGracefulDecommissionTimeoutBuilder() { + + onChanged(); + return getGracefulDecommissionTimeoutFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBuilder() { + if (gracefulDecommissionTimeoutBuilder_ != null) { + return gracefulDecommissionTimeoutBuilder_.getMessageOrBuilder(); + } else { + return gracefulDecommissionTimeout_ == null ? + com.google.protobuf.Duration.getDefaultInstance() : gracefulDecommissionTimeout_; + } + } + /** + *
+     * Optional. Timeout for graceful YARN decomissioning. Graceful
+     * decommissioning allows removing nodes from the cluster without
+     * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+     * in progress to finish before forcefully removing nodes (and potentially
+     * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+     * the maximum allowed timeout is 1 day.
+     * Only supported on Dataproc image versions 1.2 and higher.
+     * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder> + getGracefulDecommissionTimeoutFieldBuilder() { + if (gracefulDecommissionTimeoutBuilder_ == null) { + gracefulDecommissionTimeoutBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, com.google.protobuf.Duration.Builder, com.google.protobuf.DurationOrBuilder>( + getGracefulDecommissionTimeout(), + getParentForChildren(), + isClean()); + gracefulDecommissionTimeout_ = null; + } + return gracefulDecommissionTimeoutBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder> updateMaskBuilder_; + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public boolean hasUpdateMask() { + return updateMaskBuilder_ != null || updateMask_ != null; + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + onChanged(); + } else { + updateMaskBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public Builder setUpdateMask( + com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + onChanged(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (updateMask_ != null) { + updateMask_ = + com.google.protobuf.FieldMask.newBuilder(updateMask_).mergeFrom(value).buildPartial(); + } else { + updateMask_ = value; + } + onChanged(); + } else { + updateMaskBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public Builder clearUpdateMask() { + if (updateMaskBuilder_ == null) { + updateMask_ = null; + onChanged(); + } else { + updateMask_ = null; + updateMaskBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null ? + com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + } + /** + *
+     * Required. Specifies the path, relative to `Cluster`, of
+     * the field to update. For example, to change the number of workers
+     * in a cluster to 5, the `update_mask` parameter would be
+     * specified as `config.worker_config.num_instances`,
+     * and the `PATCH` request body would specify the new value, as follows:
+     *     {
+     *       "config":{
+     *         "workerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * Similarly, to change the number of preemptible workers in a cluster to 5, the
+     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+     * and the `PATCH` request body would be set as follows:
+     *     {
+     *       "config":{
+     *         "secondaryWorkerConfig":{
+     *           "numInstances":"5"
+     *         }
+     *       }
+     *     }
+     * <strong>Note:</strong> currently only the following fields can be updated:
+     * <table>
+     * <tr>
+     * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+     * </tr>
+     * <tr>
+     * <td>labels</td><td>Updates labels</td>
+     * </tr>
+     * <tr>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * </tr>
+     * <tr>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * </tr>
+     * </table>
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), + getParentForChildren(), + isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private java.lang.Object requestId_ = ""; + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 7; + */ + public java.lang.String getRequestId() { + java.lang.Object ref = requestId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 7; + */ + public com.google.protobuf.ByteString + getRequestIdBytes() { + java.lang.Object ref = requestId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + requestId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 7; + */ + public Builder setRequestId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + requestId_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 7; + */ + public Builder clearRequestId() { + + requestId_ = getDefaultInstance().getRequestId(); + onChanged(); + return this; + } + /** + *
+     * Optional. A unique id used to identify the request. If the server
+     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+     * id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+     * backend is returned.
+     * It is recommended to always set this value to a
+     * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). The maximum length is 40 characters.
+     * 
+ * + * string request_id = 7; + */ + public Builder setRequestIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + requestId_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.UpdateClusterRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.UpdateClusterRequest) + private static final com.google.cloud.dataproc.v1beta2.UpdateClusterRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.UpdateClusterRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.UpdateClusterRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateClusterRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateClusterRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateClusterRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java new file mode 100644 index 000000000000..4a7e2d50e7e1 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java @@ -0,0 +1,327 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/clusters.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface UpdateClusterRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.UpdateClusterRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project the
+   * cluster belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 5; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 5; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + java.lang.String getClusterName(); + /** + *
+   * Required. The cluster name.
+   * 
+ * + * string cluster_name = 2; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Required. The changes to the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + boolean hasCluster(); + /** + *
+   * Required. The changes to the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + com.google.cloud.dataproc.v1beta2.Cluster getCluster(); + /** + *
+   * Required. The changes to the cluster.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Cluster cluster = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder(); + + /** + *
+   * Optional. Timeout for graceful YARN decomissioning. Graceful
+   * decommissioning allows removing nodes from the cluster without
+   * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+   * in progress to finish before forcefully removing nodes (and potentially
+   * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+   * the maximum allowed timeout is 1 day.
+   * Only supported on Dataproc image versions 1.2 and higher.
+   * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + boolean hasGracefulDecommissionTimeout(); + /** + *
+   * Optional. Timeout for graceful YARN decomissioning. Graceful
+   * decommissioning allows removing nodes from the cluster without
+   * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+   * in progress to finish before forcefully removing nodes (and potentially
+   * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+   * the maximum allowed timeout is 1 day.
+   * Only supported on Dataproc image versions 1.2 and higher.
+   * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + com.google.protobuf.Duration getGracefulDecommissionTimeout(); + /** + *
+   * Optional. Timeout for graceful YARN decomissioning. Graceful
+   * decommissioning allows removing nodes from the cluster without
+   * interrupting jobs in progress. Timeout specifies how long to wait for jobs
+   * in progress to finish before forcefully removing nodes (and potentially
+   * interrupting jobs). Default timeout is 0 (for forceful decommission), and
+   * the maximum allowed timeout is 1 day.
+   * Only supported on Dataproc image versions 1.2 and higher.
+   * 
+ * + * .google.protobuf.Duration graceful_decommission_timeout = 6; + */ + com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBuilder(); + + /** + *
+   * Required. Specifies the path, relative to `Cluster`, of
+   * the field to update. For example, to change the number of workers
+   * in a cluster to 5, the `update_mask` parameter would be
+   * specified as `config.worker_config.num_instances`,
+   * and the `PATCH` request body would specify the new value, as follows:
+   *     {
+   *       "config":{
+   *         "workerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * Similarly, to change the number of preemptible workers in a cluster to 5, the
+   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+   * and the `PATCH` request body would be set as follows:
+   *     {
+   *       "config":{
+   *         "secondaryWorkerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * <strong>Note:</strong> currently only the following fields can be updated:
+   * <table>
+   * <tr>
+   * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+   * </tr>
+   * <tr>
+   * <td>labels</td><td>Updates labels</td>
+   * </tr>
+   * <tr>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * </tr>
+   * </table>
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + boolean hasUpdateMask(); + /** + *
+   * Required. Specifies the path, relative to `Cluster`, of
+   * the field to update. For example, to change the number of workers
+   * in a cluster to 5, the `update_mask` parameter would be
+   * specified as `config.worker_config.num_instances`,
+   * and the `PATCH` request body would specify the new value, as follows:
+   *     {
+   *       "config":{
+   *         "workerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * Similarly, to change the number of preemptible workers in a cluster to 5, the
+   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+   * and the `PATCH` request body would be set as follows:
+   *     {
+   *       "config":{
+   *         "secondaryWorkerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * <strong>Note:</strong> currently only the following fields can be updated:
+   * <table>
+   * <tr>
+   * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+   * </tr>
+   * <tr>
+   * <td>labels</td><td>Updates labels</td>
+   * </tr>
+   * <tr>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * </tr>
+   * </table>
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + com.google.protobuf.FieldMask getUpdateMask(); + /** + *
+   * Required. Specifies the path, relative to `Cluster`, of
+   * the field to update. For example, to change the number of workers
+   * in a cluster to 5, the `update_mask` parameter would be
+   * specified as `config.worker_config.num_instances`,
+   * and the `PATCH` request body would specify the new value, as follows:
+   *     {
+   *       "config":{
+   *         "workerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * Similarly, to change the number of preemptible workers in a cluster to 5, the
+   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
+   * and the `PATCH` request body would be set as follows:
+   *     {
+   *       "config":{
+   *         "secondaryWorkerConfig":{
+   *           "numInstances":"5"
+   *         }
+   *       }
+   *     }
+   * <strong>Note:</strong> currently only the following fields can be updated:
+   * <table>
+   * <tr>
+   * <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
+   * </tr>
+   * <tr>
+   * <td>labels</td><td>Updates labels</td>
+   * </tr>
+   * <tr>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * </tr>
+   * <tr>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * </tr>
+   * </table>
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 4; + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 7; + */ + java.lang.String getRequestId(); + /** + *
+   * Optional. A unique id used to identify the request. If the server
+   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
+   * id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
+   * backend is returned.
+   * It is recommended to always set this value to a
+   * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). The maximum length is 40 characters.
+   * 
+ * + * string request_id = 7; + */ + com.google.protobuf.ByteString + getRequestIdBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequest.java new file mode 100644 index 000000000000..7bdb2d1cfb3d --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequest.java @@ -0,0 +1,1412 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to update a job.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.UpdateJobRequest} + */ +public final class UpdateJobRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.UpdateJobRequest) + UpdateJobRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use UpdateJobRequest.newBuilder() to construct. + private UpdateJobRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private UpdateJobRequest() { + projectId_ = ""; + region_ = ""; + jobId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateJobRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + case 34: { + com.google.cloud.dataproc.v1beta2.Job.Builder subBuilder = null; + if (job_ != null) { + subBuilder = job_.toBuilder(); + } + job_ = input.readMessage(com.google.cloud.dataproc.v1beta2.Job.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(job_); + job_ = subBuilder.buildPartial(); + } + + break; + } + case 42: { + com.google.protobuf.FieldMask.Builder subBuilder = null; + if (updateMask_ != null) { + subBuilder = updateMask_.toBuilder(); + } + updateMask_ = input.readMessage(com.google.protobuf.FieldMask.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(updateMask_); + updateMask_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.UpdateJobRequest.class, com.google.cloud.dataproc.v1beta2.UpdateJobRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REGION_FIELD_NUMBER = 2; + private volatile java.lang.Object region_; + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } + } + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object jobId_; + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 3; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } + } + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 3; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int JOB_FIELD_NUMBER = 4; + private com.google.cloud.dataproc.v1beta2.Job job_; + /** + *
+   * Required. The changes to the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public boolean hasJob() { + return job_ != null; + } + /** + *
+   * Required. The changes to the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public com.google.cloud.dataproc.v1beta2.Job getJob() { + return job_ == null ? com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance() : job_; + } + /** + *
+   * Required. The changes to the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder() { + return getJob(); + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 5; + private com.google.protobuf.FieldMask updateMask_; + /** + *
+   * Required. Specifies the path, relative to <code>Job</code>, of
+   * the field to update. For example, to update the labels of a Job the
+   * <code>update_mask</code> parameter would be specified as
+   * <code>labels</code>, and the `PATCH` request body would specify the new
+   * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+   * field that can be updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public boolean hasUpdateMask() { + return updateMask_ != null; + } + /** + *
+   * Required. Specifies the path, relative to <code>Job</code>, of
+   * the field to update. For example, to update the labels of a Job the
+   * <code>update_mask</code> parameter would be specified as
+   * <code>labels</code>, and the `PATCH` request body would specify the new
+   * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+   * field that can be updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + /** + *
+   * Required. Specifies the path, relative to <code>Job</code>, of
+   * the field to update. For example, to update the labels of a Job the
+   * <code>update_mask</code> parameter would be specified as
+   * <code>labels</code>, and the `PATCH` request body would specify the new
+   * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+   * field that can be updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return getUpdateMask(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getProjectIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_); + } + if (!getRegionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, region_); + } + if (!getJobIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, jobId_); + } + if (job_ != null) { + output.writeMessage(4, getJob()); + } + if (updateMask_ != null) { + output.writeMessage(5, getUpdateMask()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getProjectIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_); + } + if (!getRegionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, region_); + } + if (!getJobIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, jobId_); + } + if (job_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getJob()); + } + if (updateMask_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, getUpdateMask()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.UpdateJobRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.UpdateJobRequest other = (com.google.cloud.dataproc.v1beta2.UpdateJobRequest) obj; + + boolean result = true; + result = result && getProjectId() + .equals(other.getProjectId()); + result = result && getRegion() + .equals(other.getRegion()); + result = result && getJobId() + .equals(other.getJobId()); + result = result && (hasJob() == other.hasJob()); + if (hasJob()) { + result = result && getJob() + .equals(other.getJob()); + } + result = result && (hasUpdateMask() == other.hasUpdateMask()); + if (hasUpdateMask()) { + result = result && getUpdateMask() + .equals(other.getUpdateMask()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + if (hasJob()) { + hash = (37 * hash) + JOB_FIELD_NUMBER; + hash = (53 * hash) + getJob().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.UpdateJobRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to update a job.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.UpdateJobRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.UpdateJobRequest) + com.google.cloud.dataproc.v1beta2.UpdateJobRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.UpdateJobRequest.class, com.google.cloud.dataproc.v1beta2.UpdateJobRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.UpdateJobRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + projectId_ = ""; + + region_ = ""; + + jobId_ = ""; + + if (jobBuilder_ == null) { + job_ = null; + } else { + job_ = null; + jobBuilder_ = null; + } + if (updateMaskBuilder_ == null) { + updateMask_ = null; + } else { + updateMask_ = null; + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_UpdateJobRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateJobRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.UpdateJobRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateJobRequest build() { + com.google.cloud.dataproc.v1beta2.UpdateJobRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateJobRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.UpdateJobRequest result = new com.google.cloud.dataproc.v1beta2.UpdateJobRequest(this); + result.projectId_ = projectId_; + result.region_ = region_; + result.jobId_ = jobId_; + if (jobBuilder_ == null) { + result.job_ = job_; + } else { + result.job_ = jobBuilder_.build(); + } + if (updateMaskBuilder_ == null) { + result.updateMask_ = updateMask_; + } else { + result.updateMask_ = updateMaskBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.UpdateJobRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.UpdateJobRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.UpdateJobRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.UpdateJobRequest.getDefaultInstance()) return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + onChanged(); + } + if (!other.getRegion().isEmpty()) { + region_ = other.region_; + onChanged(); + } + if (!other.getJobId().isEmpty()) { + jobId_ = other.jobId_; + onChanged(); + } + if (other.hasJob()) { + mergeJob(other.getJob()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.UpdateJobRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.UpdateJobRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object projectId_ = ""; + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public com.google.protobuf.ByteString + getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + projectId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder clearProjectId() { + + projectId_ = getDefaultInstance().getProjectId(); + onChanged(); + return this; + } + /** + *
+     * Required. The ID of the Google Cloud Platform project that the job
+     * belongs to.
+     * 
+ * + * string project_id = 1; + */ + public Builder setProjectIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + projectId_ = value; + onChanged(); + return this; + } + + private java.lang.Object region_ = ""; + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2; + */ + public java.lang.String getRegion() { + java.lang.Object ref = region_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + region_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2; + */ + public com.google.protobuf.ByteString + getRegionBytes() { + java.lang.Object ref = region_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + region_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2; + */ + public Builder setRegion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + region_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2; + */ + public Builder clearRegion() { + + region_ = getDefaultInstance().getRegion(); + onChanged(); + return this; + } + /** + *
+     * Required. The Cloud Dataproc region in which to handle the request.
+     * 
+ * + * string region = 2; + */ + public Builder setRegionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + region_ = value; + onChanged(); + return this; + } + + private java.lang.Object jobId_ = ""; + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 3; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 3; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 3; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + jobId_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 3; + */ + public Builder clearJobId() { + + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + *
+     * Required. The job ID.
+     * 
+ * + * string job_id = 3; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + jobId_ = value; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.Job job_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder> jobBuilder_; + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public boolean hasJob() { + return jobBuilder_ != null || job_ != null; + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public com.google.cloud.dataproc.v1beta2.Job getJob() { + if (jobBuilder_ == null) { + return job_ == null ? com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance() : job_; + } else { + return jobBuilder_.getMessage(); + } + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public Builder setJob(com.google.cloud.dataproc.v1beta2.Job value) { + if (jobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + job_ = value; + onChanged(); + } else { + jobBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public Builder setJob( + com.google.cloud.dataproc.v1beta2.Job.Builder builderForValue) { + if (jobBuilder_ == null) { + job_ = builderForValue.build(); + onChanged(); + } else { + jobBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public Builder mergeJob(com.google.cloud.dataproc.v1beta2.Job value) { + if (jobBuilder_ == null) { + if (job_ != null) { + job_ = + com.google.cloud.dataproc.v1beta2.Job.newBuilder(job_).mergeFrom(value).buildPartial(); + } else { + job_ = value; + } + onChanged(); + } else { + jobBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public Builder clearJob() { + if (jobBuilder_ == null) { + job_ = null; + onChanged(); + } else { + job_ = null; + jobBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public com.google.cloud.dataproc.v1beta2.Job.Builder getJobBuilder() { + + onChanged(); + return getJobFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder() { + if (jobBuilder_ != null) { + return jobBuilder_.getMessageOrBuilder(); + } else { + return job_ == null ? + com.google.cloud.dataproc.v1beta2.Job.getDefaultInstance() : job_; + } + } + /** + *
+     * Required. The changes to the job.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder> + getJobFieldBuilder() { + if (jobBuilder_ == null) { + jobBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.Job, com.google.cloud.dataproc.v1beta2.Job.Builder, com.google.cloud.dataproc.v1beta2.JobOrBuilder>( + getJob(), + getParentForChildren(), + isClean()); + job_ = null; + } + return jobBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder> updateMaskBuilder_; + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public boolean hasUpdateMask() { + return updateMaskBuilder_ != null || updateMask_ != null; + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + onChanged(); + } else { + updateMaskBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public Builder setUpdateMask( + com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + onChanged(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (updateMask_ != null) { + updateMask_ = + com.google.protobuf.FieldMask.newBuilder(updateMask_).mergeFrom(value).buildPartial(); + } else { + updateMask_ = value; + } + onChanged(); + } else { + updateMaskBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public Builder clearUpdateMask() { + if (updateMaskBuilder_ == null) { + updateMask_ = null; + onChanged(); + } else { + updateMask_ = null; + updateMaskBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null ? + com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + } + /** + *
+     * Required. Specifies the path, relative to <code>Job</code>, of
+     * the field to update. For example, to update the labels of a Job the
+     * <code>update_mask</code> parameter would be specified as
+     * <code>labels</code>, and the `PATCH` request body would specify the new
+     * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+     * field that can be updated.
+     * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, com.google.protobuf.FieldMask.Builder, com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), + getParentForChildren(), + isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.UpdateJobRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.UpdateJobRequest) + private static final com.google.cloud.dataproc.v1beta2.UpdateJobRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.UpdateJobRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.UpdateJobRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateJobRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateJobRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateJobRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequestOrBuilder.java new file mode 100644 index 000000000000..e77b87f47fc6 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequestOrBuilder.java @@ -0,0 +1,130 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface UpdateJobRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.UpdateJobRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + java.lang.String getProjectId(); + /** + *
+   * Required. The ID of the Google Cloud Platform project that the job
+   * belongs to.
+   * 
+ * + * string project_id = 1; + */ + com.google.protobuf.ByteString + getProjectIdBytes(); + + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2; + */ + java.lang.String getRegion(); + /** + *
+   * Required. The Cloud Dataproc region in which to handle the request.
+   * 
+ * + * string region = 2; + */ + com.google.protobuf.ByteString + getRegionBytes(); + + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 3; + */ + java.lang.String getJobId(); + /** + *
+   * Required. The job ID.
+   * 
+ * + * string job_id = 3; + */ + com.google.protobuf.ByteString + getJobIdBytes(); + + /** + *
+   * Required. The changes to the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + boolean hasJob(); + /** + *
+   * Required. The changes to the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + com.google.cloud.dataproc.v1beta2.Job getJob(); + /** + *
+   * Required. The changes to the job.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.Job job = 4; + */ + com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder(); + + /** + *
+   * Required. Specifies the path, relative to <code>Job</code>, of
+   * the field to update. For example, to update the labels of a Job the
+   * <code>update_mask</code> parameter would be specified as
+   * <code>labels</code>, and the `PATCH` request body would specify the new
+   * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+   * field that can be updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + boolean hasUpdateMask(); + /** + *
+   * Required. Specifies the path, relative to <code>Job</code>, of
+   * the field to update. For example, to update the labels of a Job the
+   * <code>update_mask</code> parameter would be specified as
+   * <code>labels</code>, and the `PATCH` request body would specify the new
+   * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+   * field that can be updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + com.google.protobuf.FieldMask getUpdateMask(); + /** + *
+   * Required. Specifies the path, relative to <code>Job</code>, of
+   * the field to update. For example, to update the labels of a Job the
+   * <code>update_mask</code> parameter would be specified as
+   * <code>labels</code>, and the `PATCH` request body would specify the new
+   * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
+   * field that can be updated.
+   * 
+ * + * .google.protobuf.FieldMask update_mask = 5; + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequest.java new file mode 100644 index 000000000000..7559b1f8a1dd --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequest.java @@ -0,0 +1,663 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A request to update a workflow template.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest} + */ +public final class UpdateWorkflowTemplateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) + UpdateWorkflowTemplateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use UpdateWorkflowTemplateRequest.newBuilder() to construct. + private UpdateWorkflowTemplateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private UpdateWorkflowTemplateRequest() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateWorkflowTemplateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder subBuilder = null; + if (template_ != null) { + subBuilder = template_.toBuilder(); + } + template_ = input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowTemplate.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(template_); + template_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.Builder.class); + } + + public static final int TEMPLATE_FIELD_NUMBER = 1; + private com.google.cloud.dataproc.v1beta2.WorkflowTemplate template_; + /** + *
+   * Required. The updated workflow template.
+   * The `template.version` field must match the current version.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public boolean hasTemplate() { + return template_ != null; + } + /** + *
+   * Required. The updated workflow template.
+   * The `template.version` field must match the current version.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate() { + return template_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } + /** + *
+   * Required. The updated workflow template.
+   * The `template.version` field must match the current version.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder() { + return getTemplate(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (template_ != null) { + output.writeMessage(1, getTemplate()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (template_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getTemplate()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest other = (com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) obj; + + boolean result = true; + result = result && (hasTemplate() == other.hasTemplate()); + if (hasTemplate()) { + result = result && getTemplate() + .equals(other.getTemplate()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTemplate()) { + hash = (37 * hash) + TEMPLATE_FIELD_NUMBER; + hash = (53 * hash) + getTemplate().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A request to update a workflow template.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.class, com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (templateBuilder_ == null) { + template_ = null; + } else { + template_ = null; + templateBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest build() { + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest buildPartial() { + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest result = new com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest(this); + if (templateBuilder_ == null) { + result.template_ = template_; + } else { + result.template_ = templateBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest other) { + if (other == com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.getDefaultInstance()) return this; + if (other.hasTemplate()) { + mergeTemplate(other.getTemplate()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.dataproc.v1beta2.WorkflowTemplate template_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> templateBuilder_; + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public boolean hasTemplate() { + return templateBuilder_ != null || template_ != null; + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate() { + if (templateBuilder_ == null) { + return template_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } else { + return templateBuilder_.getMessage(); + } + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public Builder setTemplate(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + template_ = value; + onChanged(); + } else { + templateBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public Builder setTemplate( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder builderForValue) { + if (templateBuilder_ == null) { + template_ = builderForValue.build(); + onChanged(); + } else { + templateBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public Builder mergeTemplate(com.google.cloud.dataproc.v1beta2.WorkflowTemplate value) { + if (templateBuilder_ == null) { + if (template_ != null) { + template_ = + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.newBuilder(template_).mergeFrom(value).buildPartial(); + } else { + template_ = value; + } + onChanged(); + } else { + templateBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public Builder clearTemplate() { + if (templateBuilder_ == null) { + template_ = null; + onChanged(); + } else { + template_ = null; + templateBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder getTemplateBuilder() { + + onChanged(); + return getTemplateFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder() { + if (templateBuilder_ != null) { + return templateBuilder_.getMessageOrBuilder(); + } else { + return template_ == null ? + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance() : template_; + } + } + /** + *
+     * Required. The updated workflow template.
+     * The `template.version` field must match the current version.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder> + getTemplateFieldBuilder() { + if (templateBuilder_ == null) { + templateBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplate, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder>( + getTemplate(), + getParentForChildren(), + isClean()); + template_ = null; + } + return templateBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) + private static final com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest(); + } + + public static com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateWorkflowTemplateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateWorkflowTemplateRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequestOrBuilder.java new file mode 100644 index 000000000000..e2782917c304 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequestOrBuilder.java @@ -0,0 +1,37 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface UpdateWorkflowTemplateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The updated workflow template.
+   * The `template.version` field must match the current version.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + boolean hasTemplate(); + /** + *
+   * Required. The updated workflow template.
+   * The `template.version` field must match the current version.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplate getTemplate(); + /** + *
+   * Required. The updated workflow template.
+   * The `template.version` field must match the current version.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplate template = 1; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplateOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraph.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraph.java new file mode 100644 index 000000000000..4da494a28ea9 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraph.java @@ -0,0 +1,859 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The workflow graph.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowGraph} + */ +public final class WorkflowGraph extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.WorkflowGraph) + WorkflowGraphOrBuilder { +private static final long serialVersionUID = 0L; + // Use WorkflowGraph.newBuilder() to construct. + private WorkflowGraph(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WorkflowGraph() { + nodes_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WorkflowGraph( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + nodes_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + nodes_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowNode.parser(), extensionRegistry)); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + nodes_ = java.util.Collections.unmodifiableList(nodes_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowGraph.class, com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder.class); + } + + public static final int NODES_FIELD_NUMBER = 1; + private java.util.List nodes_; + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public java.util.List getNodesList() { + return nodes_; + } + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public java.util.List + getNodesOrBuilderList() { + return nodes_; + } + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public int getNodesCount() { + return nodes_.size(); + } + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode getNodes(int index) { + return nodes_.get(index); + } + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder getNodesOrBuilder( + int index) { + return nodes_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < nodes_.size(); i++) { + output.writeMessage(1, nodes_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < nodes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, nodes_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.WorkflowGraph)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.WorkflowGraph other = (com.google.cloud.dataproc.v1beta2.WorkflowGraph) obj; + + boolean result = true; + result = result && getNodesList() + .equals(other.getNodesList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getNodesCount() > 0) { + hash = (37 * hash) + NODES_FIELD_NUMBER; + hash = (53 * hash) + getNodesList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.WorkflowGraph prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The workflow graph.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowGraph} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.WorkflowGraph) + com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowGraph.class, com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.WorkflowGraph.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getNodesFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (nodesBuilder_ == null) { + nodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + nodesBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowGraph getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowGraph.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowGraph build() { + com.google.cloud.dataproc.v1beta2.WorkflowGraph result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowGraph buildPartial() { + com.google.cloud.dataproc.v1beta2.WorkflowGraph result = new com.google.cloud.dataproc.v1beta2.WorkflowGraph(this); + int from_bitField0_ = bitField0_; + if (nodesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + nodes_ = java.util.Collections.unmodifiableList(nodes_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.nodes_ = nodes_; + } else { + result.nodes_ = nodesBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.WorkflowGraph) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.WorkflowGraph)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.WorkflowGraph other) { + if (other == com.google.cloud.dataproc.v1beta2.WorkflowGraph.getDefaultInstance()) return this; + if (nodesBuilder_ == null) { + if (!other.nodes_.isEmpty()) { + if (nodes_.isEmpty()) { + nodes_ = other.nodes_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureNodesIsMutable(); + nodes_.addAll(other.nodes_); + } + onChanged(); + } + } else { + if (!other.nodes_.isEmpty()) { + if (nodesBuilder_.isEmpty()) { + nodesBuilder_.dispose(); + nodesBuilder_ = null; + nodes_ = other.nodes_; + bitField0_ = (bitField0_ & ~0x00000001); + nodesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getNodesFieldBuilder() : null; + } else { + nodesBuilder_.addAllMessages(other.nodes_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.WorkflowGraph parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.WorkflowGraph) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List nodes_ = + java.util.Collections.emptyList(); + private void ensureNodesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + nodes_ = new java.util.ArrayList(nodes_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowNode, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder, com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder> nodesBuilder_; + + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public java.util.List getNodesList() { + if (nodesBuilder_ == null) { + return java.util.Collections.unmodifiableList(nodes_); + } else { + return nodesBuilder_.getMessageList(); + } + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public int getNodesCount() { + if (nodesBuilder_ == null) { + return nodes_.size(); + } else { + return nodesBuilder_.getCount(); + } + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode getNodes(int index) { + if (nodesBuilder_ == null) { + return nodes_.get(index); + } else { + return nodesBuilder_.getMessage(index); + } + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder setNodes( + int index, com.google.cloud.dataproc.v1beta2.WorkflowNode value) { + if (nodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodesIsMutable(); + nodes_.set(index, value); + onChanged(); + } else { + nodesBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder setNodes( + int index, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder builderForValue) { + if (nodesBuilder_ == null) { + ensureNodesIsMutable(); + nodes_.set(index, builderForValue.build()); + onChanged(); + } else { + nodesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder addNodes(com.google.cloud.dataproc.v1beta2.WorkflowNode value) { + if (nodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodesIsMutable(); + nodes_.add(value); + onChanged(); + } else { + nodesBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder addNodes( + int index, com.google.cloud.dataproc.v1beta2.WorkflowNode value) { + if (nodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodesIsMutable(); + nodes_.add(index, value); + onChanged(); + } else { + nodesBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder addNodes( + com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder builderForValue) { + if (nodesBuilder_ == null) { + ensureNodesIsMutable(); + nodes_.add(builderForValue.build()); + onChanged(); + } else { + nodesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder addNodes( + int index, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder builderForValue) { + if (nodesBuilder_ == null) { + ensureNodesIsMutable(); + nodes_.add(index, builderForValue.build()); + onChanged(); + } else { + nodesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder addAllNodes( + java.lang.Iterable values) { + if (nodesBuilder_ == null) { + ensureNodesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, nodes_); + onChanged(); + } else { + nodesBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder clearNodes() { + if (nodesBuilder_ == null) { + nodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + nodesBuilder_.clear(); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public Builder removeNodes(int index) { + if (nodesBuilder_ == null) { + ensureNodesIsMutable(); + nodes_.remove(index); + onChanged(); + } else { + nodesBuilder_.remove(index); + } + return this; + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder getNodesBuilder( + int index) { + return getNodesFieldBuilder().getBuilder(index); + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder getNodesOrBuilder( + int index) { + if (nodesBuilder_ == null) { + return nodes_.get(index); } else { + return nodesBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public java.util.List + getNodesOrBuilderList() { + if (nodesBuilder_ != null) { + return nodesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(nodes_); + } + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder addNodesBuilder() { + return getNodesFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.WorkflowNode.getDefaultInstance()); + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder addNodesBuilder( + int index) { + return getNodesFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.WorkflowNode.getDefaultInstance()); + } + /** + *
+     * Output only. The workflow nodes.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + public java.util.List + getNodesBuilderList() { + return getNodesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowNode, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder, com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder> + getNodesFieldBuilder() { + if (nodesBuilder_ == null) { + nodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowNode, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder, com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder>( + nodes_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + nodes_ = null; + } + return nodesBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.WorkflowGraph) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowGraph) + private static final com.google.cloud.dataproc.v1beta2.WorkflowGraph DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.WorkflowGraph(); + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowGraph getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WorkflowGraph parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WorkflowGraph(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowGraph getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraphOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraphOrBuilder.java new file mode 100644 index 000000000000..f2d2d928e490 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraphOrBuilder.java @@ -0,0 +1,53 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface WorkflowGraphOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.WorkflowGraph) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + java.util.List + getNodesList(); + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + com.google.cloud.dataproc.v1beta2.WorkflowNode getNodes(int index); + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + int getNodesCount(); + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + java.util.List + getNodesOrBuilderList(); + /** + *
+   * Output only. The workflow nodes.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.WorkflowNode nodes = 1; + */ + com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder getNodesOrBuilder( + int index); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadata.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadata.java new file mode 100644 index 000000000000..21dce0c8f8dc --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadata.java @@ -0,0 +1,2084 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc workflow template resource.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowMetadata} + */ +public final class WorkflowMetadata extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.WorkflowMetadata) + WorkflowMetadataOrBuilder { +private static final long serialVersionUID = 0L; + // Use WorkflowMetadata.newBuilder() to construct. + private WorkflowMetadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WorkflowMetadata() { + template_ = ""; + version_ = 0; + state_ = 0; + clusterName_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WorkflowMetadata( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + template_ = s; + break; + } + case 16: { + + version_ = input.readInt32(); + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder subBuilder = null; + if (createCluster_ != null) { + subBuilder = createCluster_.toBuilder(); + } + createCluster_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterOperation.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(createCluster_); + createCluster_ = subBuilder.buildPartial(); + } + + break; + } + case 34: { + com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder subBuilder = null; + if (graph_ != null) { + subBuilder = graph_.toBuilder(); + } + graph_ = input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowGraph.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(graph_); + graph_ = subBuilder.buildPartial(); + } + + break; + } + case 42: { + com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder subBuilder = null; + if (deleteCluster_ != null) { + subBuilder = deleteCluster_.toBuilder(); + } + deleteCluster_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterOperation.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(deleteCluster_); + deleteCluster_ = subBuilder.buildPartial(); + } + + break; + } + case 48: { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 58: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + parameters_ = com.google.protobuf.MapField.newMapField( + ParametersDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000080; + } + com.google.protobuf.MapEntry + parameters__ = input.readMessage( + ParametersDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + parameters_.getMutableMap().put( + parameters__.getKey(), parameters__.getValue()); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 8: + return internalGetParameters(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowMetadata.class, com.google.cloud.dataproc.v1beta2.WorkflowMetadata.Builder.class); + } + + /** + *
+   * The operation state.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.WorkflowMetadata.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * Unused.
+     * 
+ * + * UNKNOWN = 0; + */ + UNKNOWN(0), + /** + *
+     * The operation has been created.
+     * 
+ * + * PENDING = 1; + */ + PENDING(1), + /** + *
+     * The operation is running.
+     * 
+ * + * RUNNING = 2; + */ + RUNNING(2), + /** + *
+     * The operation is done; either cancelled or completed.
+     * 
+ * + * DONE = 3; + */ + DONE(3), + UNRECOGNIZED(-1), + ; + + /** + *
+     * Unused.
+     * 
+ * + * UNKNOWN = 0; + */ + public static final int UNKNOWN_VALUE = 0; + /** + *
+     * The operation has been created.
+     * 
+ * + * PENDING = 1; + */ + public static final int PENDING_VALUE = 1; + /** + *
+     * The operation is running.
+     * 
+ * + * RUNNING = 2; + */ + public static final int RUNNING_VALUE = 2; + /** + *
+     * The operation is done; either cancelled or completed.
+     * 
+ * + * DONE = 3; + */ + public static final int DONE_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + public static State forNumber(int value) { + switch (value) { + case 0: return UNKNOWN; + case 1: return PENDING; + case 2: return RUNNING; + case 3: return DONE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + State> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowMetadata.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.WorkflowMetadata.State) + } + + private int bitField0_; + public static final int TEMPLATE_FIELD_NUMBER = 1; + private volatile java.lang.Object template_; + /** + *
+   * Output only. The "resource name" of the template.
+   * 
+ * + * string template = 1; + */ + public java.lang.String getTemplate() { + java.lang.Object ref = template_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + template_ = s; + return s; + } + } + /** + *
+   * Output only. The "resource name" of the template.
+   * 
+ * + * string template = 1; + */ + public com.google.protobuf.ByteString + getTemplateBytes() { + java.lang.Object ref = template_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + template_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_FIELD_NUMBER = 2; + private int version_; + /** + *
+   * Output only. The version of template at the time of
+   * workflow instantiation.
+   * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + + public static final int CREATE_CLUSTER_FIELD_NUMBER = 3; + private com.google.cloud.dataproc.v1beta2.ClusterOperation createCluster_; + /** + *
+   * Output only. The create cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public boolean hasCreateCluster() { + return createCluster_ != null; + } + /** + *
+   * Output only. The create cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperation getCreateCluster() { + return createCluster_ == null ? com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance() : createCluster_; + } + /** + *
+   * Output only. The create cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder getCreateClusterOrBuilder() { + return getCreateCluster(); + } + + public static final int GRAPH_FIELD_NUMBER = 4; + private com.google.cloud.dataproc.v1beta2.WorkflowGraph graph_; + /** + *
+   * Output only. The workflow graph.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public boolean hasGraph() { + return graph_ != null; + } + /** + *
+   * Output only. The workflow graph.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowGraph getGraph() { + return graph_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowGraph.getDefaultInstance() : graph_; + } + /** + *
+   * Output only. The workflow graph.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder getGraphOrBuilder() { + return getGraph(); + } + + public static final int DELETE_CLUSTER_FIELD_NUMBER = 5; + private com.google.cloud.dataproc.v1beta2.ClusterOperation deleteCluster_; + /** + *
+   * Output only. The delete cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public boolean hasDeleteCluster() { + return deleteCluster_ != null; + } + /** + *
+   * Output only. The delete cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperation getDeleteCluster() { + return deleteCluster_ == null ? com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance() : deleteCluster_; + } + /** + *
+   * Output only. The delete cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder getDeleteClusterOrBuilder() { + return getDeleteCluster(); + } + + public static final int STATE_FIELD_NUMBER = 6; + private int state_; + /** + *
+   * Output only. The workflow state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public int getStateValue() { + return state_; + } + /** + *
+   * Output only. The workflow state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State result = com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State.UNRECOGNIZED : result; + } + + public static final int CLUSTER_NAME_FIELD_NUMBER = 7; + private volatile java.lang.Object clusterName_; + /** + *
+   * Output only. The name of the managed cluster.
+   * 
+ * + * string cluster_name = 7; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } + } + /** + *
+   * Output only. The name of the managed cluster.
+   * 
+ * + * string cluster_name = 7; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMETERS_FIELD_NUMBER = 8; + private static final class ParametersDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_ParametersEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> parameters_; + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + + public boolean containsParameters( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetParameters().getMap().containsKey(key); + } + /** + * Use {@link #getParametersMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + + public java.lang.String getParametersOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + + public java.lang.String getParametersOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getTemplateBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, template_); + } + if (version_ != 0) { + output.writeInt32(2, version_); + } + if (createCluster_ != null) { + output.writeMessage(3, getCreateCluster()); + } + if (graph_ != null) { + output.writeMessage(4, getGraph()); + } + if (deleteCluster_ != null) { + output.writeMessage(5, getDeleteCluster()); + } + if (state_ != com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State.UNKNOWN.getNumber()) { + output.writeEnum(6, state_); + } + if (!getClusterNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, clusterName_); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetParameters(), + ParametersDefaultEntryHolder.defaultEntry, + 8); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getTemplateBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, template_); + } + if (version_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, version_); + } + if (createCluster_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getCreateCluster()); + } + if (graph_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getGraph()); + } + if (deleteCluster_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, getDeleteCluster()); + } + if (state_ != com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State.UNKNOWN.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, state_); + } + if (!getClusterNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, clusterName_); + } + for (java.util.Map.Entry entry + : internalGetParameters().getMap().entrySet()) { + com.google.protobuf.MapEntry + parameters__ = ParametersDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, parameters__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.WorkflowMetadata)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.WorkflowMetadata other = (com.google.cloud.dataproc.v1beta2.WorkflowMetadata) obj; + + boolean result = true; + result = result && getTemplate() + .equals(other.getTemplate()); + result = result && (getVersion() + == other.getVersion()); + result = result && (hasCreateCluster() == other.hasCreateCluster()); + if (hasCreateCluster()) { + result = result && getCreateCluster() + .equals(other.getCreateCluster()); + } + result = result && (hasGraph() == other.hasGraph()); + if (hasGraph()) { + result = result && getGraph() + .equals(other.getGraph()); + } + result = result && (hasDeleteCluster() == other.hasDeleteCluster()); + if (hasDeleteCluster()) { + result = result && getDeleteCluster() + .equals(other.getDeleteCluster()); + } + result = result && state_ == other.state_; + result = result && getClusterName() + .equals(other.getClusterName()); + result = result && internalGetParameters().equals( + other.internalGetParameters()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TEMPLATE_FIELD_NUMBER; + hash = (53 * hash) + getTemplate().hashCode(); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + if (hasCreateCluster()) { + hash = (37 * hash) + CREATE_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getCreateCluster().hashCode(); + } + if (hasGraph()) { + hash = (37 * hash) + GRAPH_FIELD_NUMBER; + hash = (53 * hash) + getGraph().hashCode(); + } + if (hasDeleteCluster()) { + hash = (37 * hash) + DELETE_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getDeleteCluster().hashCode(); + } + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + CLUSTER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClusterName().hashCode(); + if (!internalGetParameters().getMap().isEmpty()) { + hash = (37 * hash) + PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + internalGetParameters().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.WorkflowMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc workflow template resource.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowMetadata} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.WorkflowMetadata) + com.google.cloud.dataproc.v1beta2.WorkflowMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 8: + return internalGetParameters(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 8: + return internalGetMutableParameters(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowMetadata.class, com.google.cloud.dataproc.v1beta2.WorkflowMetadata.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.WorkflowMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + template_ = ""; + + version_ = 0; + + if (createClusterBuilder_ == null) { + createCluster_ = null; + } else { + createCluster_ = null; + createClusterBuilder_ = null; + } + if (graphBuilder_ == null) { + graph_ = null; + } else { + graph_ = null; + graphBuilder_ = null; + } + if (deleteClusterBuilder_ == null) { + deleteCluster_ = null; + } else { + deleteCluster_ = null; + deleteClusterBuilder_ = null; + } + state_ = 0; + + clusterName_ = ""; + + internalGetMutableParameters().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowMetadata getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowMetadata build() { + com.google.cloud.dataproc.v1beta2.WorkflowMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowMetadata buildPartial() { + com.google.cloud.dataproc.v1beta2.WorkflowMetadata result = new com.google.cloud.dataproc.v1beta2.WorkflowMetadata(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.template_ = template_; + result.version_ = version_; + if (createClusterBuilder_ == null) { + result.createCluster_ = createCluster_; + } else { + result.createCluster_ = createClusterBuilder_.build(); + } + if (graphBuilder_ == null) { + result.graph_ = graph_; + } else { + result.graph_ = graphBuilder_.build(); + } + if (deleteClusterBuilder_ == null) { + result.deleteCluster_ = deleteCluster_; + } else { + result.deleteCluster_ = deleteClusterBuilder_.build(); + } + result.state_ = state_; + result.clusterName_ = clusterName_; + result.parameters_ = internalGetParameters(); + result.parameters_.makeImmutable(); + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.WorkflowMetadata) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.WorkflowMetadata)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.WorkflowMetadata other) { + if (other == com.google.cloud.dataproc.v1beta2.WorkflowMetadata.getDefaultInstance()) return this; + if (!other.getTemplate().isEmpty()) { + template_ = other.template_; + onChanged(); + } + if (other.getVersion() != 0) { + setVersion(other.getVersion()); + } + if (other.hasCreateCluster()) { + mergeCreateCluster(other.getCreateCluster()); + } + if (other.hasGraph()) { + mergeGraph(other.getGraph()); + } + if (other.hasDeleteCluster()) { + mergeDeleteCluster(other.getDeleteCluster()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getClusterName().isEmpty()) { + clusterName_ = other.clusterName_; + onChanged(); + } + internalGetMutableParameters().mergeFrom( + other.internalGetParameters()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.WorkflowMetadata parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.WorkflowMetadata) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object template_ = ""; + /** + *
+     * Output only. The "resource name" of the template.
+     * 
+ * + * string template = 1; + */ + public java.lang.String getTemplate() { + java.lang.Object ref = template_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + template_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The "resource name" of the template.
+     * 
+ * + * string template = 1; + */ + public com.google.protobuf.ByteString + getTemplateBytes() { + java.lang.Object ref = template_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + template_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The "resource name" of the template.
+     * 
+ * + * string template = 1; + */ + public Builder setTemplate( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + template_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The "resource name" of the template.
+     * 
+ * + * string template = 1; + */ + public Builder clearTemplate() { + + template_ = getDefaultInstance().getTemplate(); + onChanged(); + return this; + } + /** + *
+     * Output only. The "resource name" of the template.
+     * 
+ * + * string template = 1; + */ + public Builder setTemplateBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + template_ = value; + onChanged(); + return this; + } + + private int version_ ; + /** + *
+     * Output only. The version of template at the time of
+     * workflow instantiation.
+     * 
+ * + * int32 version = 2; + */ + public int getVersion() { + return version_; + } + /** + *
+     * Output only. The version of template at the time of
+     * workflow instantiation.
+     * 
+ * + * int32 version = 2; + */ + public Builder setVersion(int value) { + + version_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The version of template at the time of
+     * workflow instantiation.
+     * 
+ * + * int32 version = 2; + */ + public Builder clearVersion() { + + version_ = 0; + onChanged(); + return this; + } + + private com.google.cloud.dataproc.v1beta2.ClusterOperation createCluster_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperation, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder> createClusterBuilder_; + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public boolean hasCreateCluster() { + return createClusterBuilder_ != null || createCluster_ != null; + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperation getCreateCluster() { + if (createClusterBuilder_ == null) { + return createCluster_ == null ? com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance() : createCluster_; + } else { + return createClusterBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public Builder setCreateCluster(com.google.cloud.dataproc.v1beta2.ClusterOperation value) { + if (createClusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createCluster_ = value; + onChanged(); + } else { + createClusterBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public Builder setCreateCluster( + com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder builderForValue) { + if (createClusterBuilder_ == null) { + createCluster_ = builderForValue.build(); + onChanged(); + } else { + createClusterBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public Builder mergeCreateCluster(com.google.cloud.dataproc.v1beta2.ClusterOperation value) { + if (createClusterBuilder_ == null) { + if (createCluster_ != null) { + createCluster_ = + com.google.cloud.dataproc.v1beta2.ClusterOperation.newBuilder(createCluster_).mergeFrom(value).buildPartial(); + } else { + createCluster_ = value; + } + onChanged(); + } else { + createClusterBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public Builder clearCreateCluster() { + if (createClusterBuilder_ == null) { + createCluster_ = null; + onChanged(); + } else { + createCluster_ = null; + createClusterBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder getCreateClusterBuilder() { + + onChanged(); + return getCreateClusterFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder getCreateClusterOrBuilder() { + if (createClusterBuilder_ != null) { + return createClusterBuilder_.getMessageOrBuilder(); + } else { + return createCluster_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance() : createCluster_; + } + } + /** + *
+     * Output only. The create cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperation, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder> + getCreateClusterFieldBuilder() { + if (createClusterBuilder_ == null) { + createClusterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperation, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder>( + getCreateCluster(), + getParentForChildren(), + isClean()); + createCluster_ = null; + } + return createClusterBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.WorkflowGraph graph_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowGraph, com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder, com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder> graphBuilder_; + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public boolean hasGraph() { + return graphBuilder_ != null || graph_ != null; + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowGraph getGraph() { + if (graphBuilder_ == null) { + return graph_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowGraph.getDefaultInstance() : graph_; + } else { + return graphBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public Builder setGraph(com.google.cloud.dataproc.v1beta2.WorkflowGraph value) { + if (graphBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + graph_ = value; + onChanged(); + } else { + graphBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public Builder setGraph( + com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder builderForValue) { + if (graphBuilder_ == null) { + graph_ = builderForValue.build(); + onChanged(); + } else { + graphBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public Builder mergeGraph(com.google.cloud.dataproc.v1beta2.WorkflowGraph value) { + if (graphBuilder_ == null) { + if (graph_ != null) { + graph_ = + com.google.cloud.dataproc.v1beta2.WorkflowGraph.newBuilder(graph_).mergeFrom(value).buildPartial(); + } else { + graph_ = value; + } + onChanged(); + } else { + graphBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public Builder clearGraph() { + if (graphBuilder_ == null) { + graph_ = null; + onChanged(); + } else { + graph_ = null; + graphBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder getGraphBuilder() { + + onChanged(); + return getGraphFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder getGraphOrBuilder() { + if (graphBuilder_ != null) { + return graphBuilder_.getMessageOrBuilder(); + } else { + return graph_ == null ? + com.google.cloud.dataproc.v1beta2.WorkflowGraph.getDefaultInstance() : graph_; + } + } + /** + *
+     * Output only. The workflow graph.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowGraph, com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder, com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder> + getGraphFieldBuilder() { + if (graphBuilder_ == null) { + graphBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowGraph, com.google.cloud.dataproc.v1beta2.WorkflowGraph.Builder, com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder>( + getGraph(), + getParentForChildren(), + isClean()); + graph_ = null; + } + return graphBuilder_; + } + + private com.google.cloud.dataproc.v1beta2.ClusterOperation deleteCluster_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperation, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder> deleteClusterBuilder_; + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public boolean hasDeleteCluster() { + return deleteClusterBuilder_ != null || deleteCluster_ != null; + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperation getDeleteCluster() { + if (deleteClusterBuilder_ == null) { + return deleteCluster_ == null ? com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance() : deleteCluster_; + } else { + return deleteClusterBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public Builder setDeleteCluster(com.google.cloud.dataproc.v1beta2.ClusterOperation value) { + if (deleteClusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + deleteCluster_ = value; + onChanged(); + } else { + deleteClusterBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public Builder setDeleteCluster( + com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder builderForValue) { + if (deleteClusterBuilder_ == null) { + deleteCluster_ = builderForValue.build(); + onChanged(); + } else { + deleteClusterBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public Builder mergeDeleteCluster(com.google.cloud.dataproc.v1beta2.ClusterOperation value) { + if (deleteClusterBuilder_ == null) { + if (deleteCluster_ != null) { + deleteCluster_ = + com.google.cloud.dataproc.v1beta2.ClusterOperation.newBuilder(deleteCluster_).mergeFrom(value).buildPartial(); + } else { + deleteCluster_ = value; + } + onChanged(); + } else { + deleteClusterBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public Builder clearDeleteCluster() { + if (deleteClusterBuilder_ == null) { + deleteCluster_ = null; + onChanged(); + } else { + deleteCluster_ = null; + deleteClusterBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder getDeleteClusterBuilder() { + + onChanged(); + return getDeleteClusterFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + public com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder getDeleteClusterOrBuilder() { + if (deleteClusterBuilder_ != null) { + return deleteClusterBuilder_.getMessageOrBuilder(); + } else { + return deleteCluster_ == null ? + com.google.cloud.dataproc.v1beta2.ClusterOperation.getDefaultInstance() : deleteCluster_; + } + } + /** + *
+     * Output only. The delete cluster operation metadata.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperation, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder> + getDeleteClusterFieldBuilder() { + if (deleteClusterBuilder_ == null) { + deleteClusterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterOperation, com.google.cloud.dataproc.v1beta2.ClusterOperation.Builder, com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder>( + getDeleteCluster(), + getParentForChildren(), + isClean()); + deleteCluster_ = null; + } + return deleteClusterBuilder_; + } + + private int state_ = 0; + /** + *
+     * Output only. The workflow state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public int getStateValue() { + return state_; + } + /** + *
+     * Output only. The workflow state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public Builder setStateValue(int value) { + state_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The workflow state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State result = com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State.UNRECOGNIZED : result; + } + /** + *
+     * Output only. The workflow state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public Builder setState(com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. The workflow state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object clusterName_ = ""; + /** + *
+     * Output only. The name of the managed cluster.
+     * 
+ * + * string cluster_name = 7; + */ + public java.lang.String getClusterName() { + java.lang.Object ref = clusterName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clusterName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The name of the managed cluster.
+     * 
+ * + * string cluster_name = 7; + */ + public com.google.protobuf.ByteString + getClusterNameBytes() { + java.lang.Object ref = clusterName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + clusterName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The name of the managed cluster.
+     * 
+ * + * string cluster_name = 7; + */ + public Builder setClusterName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + clusterName_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the managed cluster.
+     * 
+ * + * string cluster_name = 7; + */ + public Builder clearClusterName() { + + clusterName_ = getDefaultInstance().getClusterName(); + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the managed cluster.
+     * 
+ * + * string cluster_name = 7; + */ + public Builder setClusterNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + clusterName_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> parameters_; + private com.google.protobuf.MapField + internalGetParameters() { + if (parameters_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + return parameters_; + } + private com.google.protobuf.MapField + internalGetMutableParameters() { + onChanged();; + if (parameters_ == null) { + parameters_ = com.google.protobuf.MapField.newMapField( + ParametersDefaultEntryHolder.defaultEntry); + } + if (!parameters_.isMutable()) { + parameters_ = parameters_.copy(); + } + return parameters_; + } + + public int getParametersCount() { + return internalGetParameters().getMap().size(); + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + + public boolean containsParameters( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetParameters().getMap().containsKey(key); + } + /** + * Use {@link #getParametersMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getParameters() { + return getParametersMap(); + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + + public java.util.Map getParametersMap() { + return internalGetParameters().getMap(); + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + + public java.lang.String getParametersOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetParameters().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + + public java.lang.String getParametersOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetParameters().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearParameters() { + internalGetMutableParameters().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + + public Builder removeParameters( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableParameters().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableParameters() { + return internalGetMutableParameters().getMutableMap(); + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + public Builder putParameters( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableParameters().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Map from parameter names to values that were used for those parameters.
+     * 
+ * + * map<string, string> parameters = 8; + */ + + public Builder putAllParameters( + java.util.Map values) { + internalGetMutableParameters().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.WorkflowMetadata) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowMetadata) + private static final com.google.cloud.dataproc.v1beta2.WorkflowMetadata DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.WorkflowMetadata(); + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WorkflowMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WorkflowMetadata(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadataOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadataOrBuilder.java new file mode 100644 index 000000000000..634ee85eec8c --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadataOrBuilder.java @@ -0,0 +1,201 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface WorkflowMetadataOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.WorkflowMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The "resource name" of the template.
+   * 
+ * + * string template = 1; + */ + java.lang.String getTemplate(); + /** + *
+   * Output only. The "resource name" of the template.
+   * 
+ * + * string template = 1; + */ + com.google.protobuf.ByteString + getTemplateBytes(); + + /** + *
+   * Output only. The version of template at the time of
+   * workflow instantiation.
+   * 
+ * + * int32 version = 2; + */ + int getVersion(); + + /** + *
+   * Output only. The create cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + boolean hasCreateCluster(); + /** + *
+   * Output only. The create cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperation getCreateCluster(); + /** + *
+   * Output only. The create cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation create_cluster = 3; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder getCreateClusterOrBuilder(); + + /** + *
+   * Output only. The workflow graph.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + boolean hasGraph(); + /** + *
+   * Output only. The workflow graph.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + com.google.cloud.dataproc.v1beta2.WorkflowGraph getGraph(); + /** + *
+   * Output only. The workflow graph.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowGraph graph = 4; + */ + com.google.cloud.dataproc.v1beta2.WorkflowGraphOrBuilder getGraphOrBuilder(); + + /** + *
+   * Output only. The delete cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + boolean hasDeleteCluster(); + /** + *
+   * Output only. The delete cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperation getDeleteCluster(); + /** + *
+   * Output only. The delete cluster operation metadata.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterOperation delete_cluster = 5; + */ + com.google.cloud.dataproc.v1beta2.ClusterOperationOrBuilder getDeleteClusterOrBuilder(); + + /** + *
+   * Output only. The workflow state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + int getStateValue(); + /** + *
+   * Output only. The workflow state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowMetadata.State state = 6; + */ + com.google.cloud.dataproc.v1beta2.WorkflowMetadata.State getState(); + + /** + *
+   * Output only. The name of the managed cluster.
+   * 
+ * + * string cluster_name = 7; + */ + java.lang.String getClusterName(); + /** + *
+   * Output only. The name of the managed cluster.
+   * 
+ * + * string cluster_name = 7; + */ + com.google.protobuf.ByteString + getClusterNameBytes(); + + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + int getParametersCount(); + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + boolean containsParameters( + java.lang.String key); + /** + * Use {@link #getParametersMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getParameters(); + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + java.util.Map + getParametersMap(); + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + + java.lang.String getParametersOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Map from parameter names to values that were used for those parameters.
+   * 
+ * + * map<string, string> parameters = 8; + */ + + java.lang.String getParametersOrThrow( + java.lang.String key); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNode.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNode.java new file mode 100644 index 000000000000..76f42417bd12 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNode.java @@ -0,0 +1,1415 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * The workflow node.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowNode} + */ +public final class WorkflowNode extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.WorkflowNode) + WorkflowNodeOrBuilder { +private static final long serialVersionUID = 0L; + // Use WorkflowNode.newBuilder() to construct. + private WorkflowNode(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WorkflowNode() { + stepId_ = ""; + prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + jobId_ = ""; + state_ = 0; + error_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WorkflowNode( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + stepId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + prerequisiteStepIds_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + prerequisiteStepIds_.add(s); + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + case 40: { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + + error_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + prerequisiteStepIds_ = prerequisiteStepIds_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowNode.class, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder.class); + } + + /** + *
+   * The workflow node state.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.WorkflowNode.NodeState} + */ + public enum NodeState + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * State is unspecified.
+     * 
+ * + * NODE_STATUS_UNSPECIFIED = 0; + */ + NODE_STATUS_UNSPECIFIED(0), + /** + *
+     * The node is awaiting prerequisite node to finish.
+     * 
+ * + * BLOCKED = 1; + */ + BLOCKED(1), + /** + *
+     * The node is runnable but not running.
+     * 
+ * + * RUNNABLE = 2; + */ + RUNNABLE(2), + /** + *
+     * The node is running.
+     * 
+ * + * RUNNING = 3; + */ + RUNNING(3), + /** + *
+     * The node completed successfully.
+     * 
+ * + * COMPLETED = 4; + */ + COMPLETED(4), + /** + *
+     * The node failed. A node can be marked FAILED because
+     * its ancestor or peer failed.
+     * 
+ * + * FAILED = 5; + */ + FAILED(5), + UNRECOGNIZED(-1), + ; + + /** + *
+     * State is unspecified.
+     * 
+ * + * NODE_STATUS_UNSPECIFIED = 0; + */ + public static final int NODE_STATUS_UNSPECIFIED_VALUE = 0; + /** + *
+     * The node is awaiting prerequisite node to finish.
+     * 
+ * + * BLOCKED = 1; + */ + public static final int BLOCKED_VALUE = 1; + /** + *
+     * The node is runnable but not running.
+     * 
+ * + * RUNNABLE = 2; + */ + public static final int RUNNABLE_VALUE = 2; + /** + *
+     * The node is running.
+     * 
+ * + * RUNNING = 3; + */ + public static final int RUNNING_VALUE = 3; + /** + *
+     * The node completed successfully.
+     * 
+ * + * COMPLETED = 4; + */ + public static final int COMPLETED_VALUE = 4; + /** + *
+     * The node failed. A node can be marked FAILED because
+     * its ancestor or peer failed.
+     * 
+ * + * FAILED = 5; + */ + public static final int FAILED_VALUE = 5; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static NodeState valueOf(int value) { + return forNumber(value); + } + + public static NodeState forNumber(int value) { + switch (value) { + case 0: return NODE_STATUS_UNSPECIFIED; + case 1: return BLOCKED; + case 2: return RUNNABLE; + case 3: return RUNNING; + case 4: return COMPLETED; + case 5: return FAILED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + NodeState> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public NodeState findValueByNumber(int number) { + return NodeState.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowNode.getDescriptor().getEnumTypes().get(0); + } + + private static final NodeState[] VALUES = values(); + + public static NodeState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private NodeState(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.WorkflowNode.NodeState) + } + + private int bitField0_; + public static final int STEP_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object stepId_; + /** + *
+   * Output only. The name of the node.
+   * 
+ * + * string step_id = 1; + */ + public java.lang.String getStepId() { + java.lang.Object ref = stepId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stepId_ = s; + return s; + } + } + /** + *
+   * Output only. The name of the node.
+   * 
+ * + * string step_id = 1; + */ + public com.google.protobuf.ByteString + getStepIdBytes() { + java.lang.Object ref = stepId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + stepId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREREQUISITE_STEP_IDS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList prerequisiteStepIds_; + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public com.google.protobuf.ProtocolStringList + getPrerequisiteStepIdsList() { + return prerequisiteStepIds_; + } + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public int getPrerequisiteStepIdsCount() { + return prerequisiteStepIds_.size(); + } + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public java.lang.String getPrerequisiteStepIds(int index) { + return prerequisiteStepIds_.get(index); + } + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public com.google.protobuf.ByteString + getPrerequisiteStepIdsBytes(int index) { + return prerequisiteStepIds_.getByteString(index); + } + + public static final int JOB_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object jobId_; + /** + *
+   * Output only. The job id; populated after the node enters RUNNING state.
+   * 
+ * + * string job_id = 3; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } + } + /** + *
+   * Output only. The job id; populated after the node enters RUNNING state.
+   * 
+ * + * string job_id = 3; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_FIELD_NUMBER = 5; + private int state_; + /** + *
+   * Output only. The node state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public int getStateValue() { + return state_; + } + /** + *
+   * Output only. The node state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState result = com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState.UNRECOGNIZED : result; + } + + public static final int ERROR_FIELD_NUMBER = 6; + private volatile java.lang.Object error_; + /** + *
+   * Output only. The error detail.
+   * 
+ * + * string error = 6; + */ + public java.lang.String getError() { + java.lang.Object ref = error_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + error_ = s; + return s; + } + } + /** + *
+   * Output only. The error detail.
+   * 
+ * + * string error = 6; + */ + public com.google.protobuf.ByteString + getErrorBytes() { + java.lang.Object ref = error_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + error_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getStepIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, stepId_); + } + for (int i = 0; i < prerequisiteStepIds_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, prerequisiteStepIds_.getRaw(i)); + } + if (!getJobIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, jobId_); + } + if (state_ != com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState.NODE_STATUS_UNSPECIFIED.getNumber()) { + output.writeEnum(5, state_); + } + if (!getErrorBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, error_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getStepIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, stepId_); + } + { + int dataSize = 0; + for (int i = 0; i < prerequisiteStepIds_.size(); i++) { + dataSize += computeStringSizeNoTag(prerequisiteStepIds_.getRaw(i)); + } + size += dataSize; + size += 1 * getPrerequisiteStepIdsList().size(); + } + if (!getJobIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, jobId_); + } + if (state_ != com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState.NODE_STATUS_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, state_); + } + if (!getErrorBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, error_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.WorkflowNode)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.WorkflowNode other = (com.google.cloud.dataproc.v1beta2.WorkflowNode) obj; + + boolean result = true; + result = result && getStepId() + .equals(other.getStepId()); + result = result && getPrerequisiteStepIdsList() + .equals(other.getPrerequisiteStepIdsList()); + result = result && getJobId() + .equals(other.getJobId()); + result = result && state_ == other.state_; + result = result && getError() + .equals(other.getError()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + STEP_ID_FIELD_NUMBER; + hash = (53 * hash) + getStepId().hashCode(); + if (getPrerequisiteStepIdsCount() > 0) { + hash = (37 * hash) + PREREQUISITE_STEP_IDS_FIELD_NUMBER; + hash = (53 * hash) + getPrerequisiteStepIdsList().hashCode(); + } + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + ERROR_FIELD_NUMBER; + hash = (53 * hash) + getError().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowNode parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.WorkflowNode prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The workflow node.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowNode} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.WorkflowNode) + com.google.cloud.dataproc.v1beta2.WorkflowNodeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowNode.class, com.google.cloud.dataproc.v1beta2.WorkflowNode.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.WorkflowNode.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + stepId_ = ""; + + prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + jobId_ = ""; + + state_ = 0; + + error_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowNode getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowNode.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowNode build() { + com.google.cloud.dataproc.v1beta2.WorkflowNode result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowNode buildPartial() { + com.google.cloud.dataproc.v1beta2.WorkflowNode result = new com.google.cloud.dataproc.v1beta2.WorkflowNode(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.stepId_ = stepId_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + prerequisiteStepIds_ = prerequisiteStepIds_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.prerequisiteStepIds_ = prerequisiteStepIds_; + result.jobId_ = jobId_; + result.state_ = state_; + result.error_ = error_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.WorkflowNode) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.WorkflowNode)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.WorkflowNode other) { + if (other == com.google.cloud.dataproc.v1beta2.WorkflowNode.getDefaultInstance()) return this; + if (!other.getStepId().isEmpty()) { + stepId_ = other.stepId_; + onChanged(); + } + if (!other.prerequisiteStepIds_.isEmpty()) { + if (prerequisiteStepIds_.isEmpty()) { + prerequisiteStepIds_ = other.prerequisiteStepIds_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.addAll(other.prerequisiteStepIds_); + } + onChanged(); + } + if (!other.getJobId().isEmpty()) { + jobId_ = other.jobId_; + onChanged(); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.getError().isEmpty()) { + error_ = other.error_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.WorkflowNode parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.WorkflowNode) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object stepId_ = ""; + /** + *
+     * Output only. The name of the node.
+     * 
+ * + * string step_id = 1; + */ + public java.lang.String getStepId() { + java.lang.Object ref = stepId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + stepId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The name of the node.
+     * 
+ * + * string step_id = 1; + */ + public com.google.protobuf.ByteString + getStepIdBytes() { + java.lang.Object ref = stepId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + stepId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The name of the node.
+     * 
+ * + * string step_id = 1; + */ + public Builder setStepId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + stepId_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the node.
+     * 
+ * + * string step_id = 1; + */ + public Builder clearStepId() { + + stepId_ = getDefaultInstance().getStepId(); + onChanged(); + return this; + } + /** + *
+     * Output only. The name of the node.
+     * 
+ * + * string step_id = 1; + */ + public Builder setStepIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + stepId_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensurePrerequisiteStepIdsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + prerequisiteStepIds_ = new com.google.protobuf.LazyStringArrayList(prerequisiteStepIds_); + bitField0_ |= 0x00000002; + } + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public com.google.protobuf.ProtocolStringList + getPrerequisiteStepIdsList() { + return prerequisiteStepIds_.getUnmodifiableView(); + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public int getPrerequisiteStepIdsCount() { + return prerequisiteStepIds_.size(); + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public java.lang.String getPrerequisiteStepIds(int index) { + return prerequisiteStepIds_.get(index); + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public com.google.protobuf.ByteString + getPrerequisiteStepIdsBytes(int index) { + return prerequisiteStepIds_.getByteString(index); + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public Builder setPrerequisiteStepIds( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public Builder addPrerequisiteStepIds( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.add(value); + onChanged(); + return this; + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public Builder addAllPrerequisiteStepIds( + java.lang.Iterable values) { + ensurePrerequisiteStepIdsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, prerequisiteStepIds_); + onChanged(); + return this; + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public Builder clearPrerequisiteStepIds() { + prerequisiteStepIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + *
+     * Output only. Node's prerequisite nodes.
+     * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + public Builder addPrerequisiteStepIdsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensurePrerequisiteStepIdsIsMutable(); + prerequisiteStepIds_.add(value); + onChanged(); + return this; + } + + private java.lang.Object jobId_ = ""; + /** + *
+     * Output only. The job id; populated after the node enters RUNNING state.
+     * 
+ * + * string job_id = 3; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The job id; populated after the node enters RUNNING state.
+     * 
+ * + * string job_id = 3; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The job id; populated after the node enters RUNNING state.
+     * 
+ * + * string job_id = 3; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + jobId_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The job id; populated after the node enters RUNNING state.
+     * 
+ * + * string job_id = 3; + */ + public Builder clearJobId() { + + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + *
+     * Output only. The job id; populated after the node enters RUNNING state.
+     * 
+ * + * string job_id = 3; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + jobId_ = value; + onChanged(); + return this; + } + + private int state_ = 0; + /** + *
+     * Output only. The node state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public int getStateValue() { + return state_; + } + /** + *
+     * Output only. The node state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public Builder setStateValue(int value) { + state_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The node state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState result = com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState.UNRECOGNIZED : result; + } + /** + *
+     * Output only. The node state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public Builder setState(com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Output only. The node state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private java.lang.Object error_ = ""; + /** + *
+     * Output only. The error detail.
+     * 
+ * + * string error = 6; + */ + public java.lang.String getError() { + java.lang.Object ref = error_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + error_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The error detail.
+     * 
+ * + * string error = 6; + */ + public com.google.protobuf.ByteString + getErrorBytes() { + java.lang.Object ref = error_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + error_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The error detail.
+     * 
+ * + * string error = 6; + */ + public Builder setError( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + error_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The error detail.
+     * 
+ * + * string error = 6; + */ + public Builder clearError() { + + error_ = getDefaultInstance().getError(); + onChanged(); + return this; + } + /** + *
+     * Output only. The error detail.
+     * 
+ * + * string error = 6; + */ + public Builder setErrorBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + error_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.WorkflowNode) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowNode) + private static final com.google.cloud.dataproc.v1beta2.WorkflowNode DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.WorkflowNode(); + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowNode getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WorkflowNode parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WorkflowNode(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowNode getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNodeOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNodeOrBuilder.java new file mode 100644 index 000000000000..2439c79f9227 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNodeOrBuilder.java @@ -0,0 +1,115 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface WorkflowNodeOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.WorkflowNode) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Output only. The name of the node.
+   * 
+ * + * string step_id = 1; + */ + java.lang.String getStepId(); + /** + *
+   * Output only. The name of the node.
+   * 
+ * + * string step_id = 1; + */ + com.google.protobuf.ByteString + getStepIdBytes(); + + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + java.util.List + getPrerequisiteStepIdsList(); + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + int getPrerequisiteStepIdsCount(); + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + java.lang.String getPrerequisiteStepIds(int index); + /** + *
+   * Output only. Node's prerequisite nodes.
+   * 
+ * + * repeated string prerequisite_step_ids = 2; + */ + com.google.protobuf.ByteString + getPrerequisiteStepIdsBytes(int index); + + /** + *
+   * Output only. The job id; populated after the node enters RUNNING state.
+   * 
+ * + * string job_id = 3; + */ + java.lang.String getJobId(); + /** + *
+   * Output only. The job id; populated after the node enters RUNNING state.
+   * 
+ * + * string job_id = 3; + */ + com.google.protobuf.ByteString + getJobIdBytes(); + + /** + *
+   * Output only. The node state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + int getStateValue(); + /** + *
+   * Output only. The node state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowNode.NodeState state = 5; + */ + com.google.cloud.dataproc.v1beta2.WorkflowNode.NodeState getState(); + + /** + *
+   * Output only. The error detail.
+   * 
+ * + * string error = 6; + */ + java.lang.String getError(); + /** + *
+   * Output only. The error detail.
+   * 
+ * + * string error = 6; + */ + com.google.protobuf.ByteString + getErrorBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplate.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplate.java new file mode 100644 index 000000000000..896bed458d97 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplate.java @@ -0,0 +1,2401 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A Cloud Dataproc workflow template resource.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowTemplate} + */ +public final class WorkflowTemplate extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.WorkflowTemplate) + WorkflowTemplateOrBuilder { +private static final long serialVersionUID = 0L; + // Use WorkflowTemplate.newBuilder() to construct. + private WorkflowTemplate(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WorkflowTemplate() { + id_ = ""; + name_ = ""; + version_ = 0; + jobs_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WorkflowTemplate( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + id_ = s; + break; + } + case 24: { + + version_ = input.readInt32(); + break; + } + case 34: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (createTime_ != null) { + subBuilder = createTime_.toBuilder(); + } + createTime_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(createTime_); + createTime_ = subBuilder.buildPartial(); + } + + break; + } + case 42: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (updateTime_ != null) { + subBuilder = updateTime_.toBuilder(); + } + updateTime_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(updateTime_); + updateTime_ = subBuilder.buildPartial(); + } + + break; + } + case 50: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000020; + } + com.google.protobuf.MapEntry + labels__ = input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + labels_.getMutableMap().put( + labels__.getKey(), labels__.getValue()); + break; + } + case 58: { + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder subBuilder = null; + if (placement_ != null) { + subBuilder = placement_.toBuilder(); + } + placement_ = input.readMessage(com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(placement_); + placement_ = subBuilder.buildPartial(); + } + + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + jobs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + jobs_.add( + input.readMessage(com.google.cloud.dataproc.v1beta2.OrderedJob.parser(), extensionRegistry)); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + jobs_ = java.util.Collections.unmodifiableList(jobs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 6: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.class, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder.class); + } + + private int bitField0_; + public static final int ID_FIELD_NUMBER = 2; + private volatile java.lang.Object id_; + /** + *
+   * Required. The template id.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string id = 2; + */ + public java.lang.String getId() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + id_ = s; + return s; + } + } + /** + *
+   * Required. The template id.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string id = 2; + */ + public com.google.protobuf.ByteString + getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + id_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
+   * Output only. The "resource name" of the template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * Output only. The "resource name" of the template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_FIELD_NUMBER = 3; + private int version_; + /** + *
+   * Optional. Used to perform a consistent read-modify-write.
+   * This field should be left blank for a `CreateWorkflowTemplate` request. It
+   * is required for an `UpdateWorkflowTemplate` request, and must match the
+   * current server version. A typical update template flow would fetch the
+   * current template with a `GetWorkflowTemplate` request, which will return
+   * the current template with the `version` field filled in with the
+   * current server version. The user updates other fields in the template,
+   * then returns it as part of the `UpdateWorkflowTemplate` request.
+   * 
+ * + * int32 version = 3; + */ + public int getVersion() { + return version_; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp createTime_; + /** + *
+   * Output only. The time template was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public boolean hasCreateTime() { + return createTime_ != null; + } + /** + *
+   * Output only. The time template was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + /** + *
+   * Output only. The time template was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return getCreateTime(); + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp updateTime_; + /** + *
+   * Output only. The time template was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public boolean hasUpdateTime() { + return updateTime_ != null; + } + /** + *
+   * Output only. The time template was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + /** + *
+   * Output only. The time template was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return getUpdateTime(); + } + + public static final int LABELS_FIELD_NUMBER = 6; + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, java.lang.String> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int PLACEMENT_FIELD_NUMBER = 7; + private com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement_; + /** + *
+   * Required. WorkflowTemplate scheduling information.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public boolean hasPlacement() { + return placement_ != null; + } + /** + *
+   * Required. WorkflowTemplate scheduling information.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement getPlacement() { + return placement_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.getDefaultInstance() : placement_; + } + /** + *
+   * Required. WorkflowTemplate scheduling information.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder getPlacementOrBuilder() { + return getPlacement(); + } + + public static final int JOBS_FIELD_NUMBER = 8; + private java.util.List jobs_; + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public java.util.List getJobsList() { + return jobs_; + } + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public java.util.List + getJobsOrBuilderList() { + return jobs_; + } + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public int getJobsCount() { + return jobs_.size(); + } + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJob getJobs(int index) { + return jobs_.get(index); + } + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder getJobsOrBuilder( + int index) { + return jobs_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!getIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, id_); + } + if (version_ != 0) { + output.writeInt32(3, version_); + } + if (createTime_ != null) { + output.writeMessage(4, getCreateTime()); + } + if (updateTime_ != null) { + output.writeMessage(5, getUpdateTime()); + } + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetLabels(), + LabelsDefaultEntryHolder.defaultEntry, + 6); + if (placement_ != null) { + output.writeMessage(7, getPlacement()); + } + for (int i = 0; i < jobs_.size(); i++) { + output.writeMessage(8, jobs_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!getIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, id_); + } + if (version_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, version_); + } + if (createTime_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getCreateTime()); + } + if (updateTime_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, getUpdateTime()); + } + for (java.util.Map.Entry entry + : internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry + labels__ = LabelsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, labels__); + } + if (placement_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, getPlacement()); + } + for (int i = 0; i < jobs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, jobs_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.WorkflowTemplate)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.WorkflowTemplate other = (com.google.cloud.dataproc.v1beta2.WorkflowTemplate) obj; + + boolean result = true; + result = result && getId() + .equals(other.getId()); + result = result && getName() + .equals(other.getName()); + result = result && (getVersion() + == other.getVersion()); + result = result && (hasCreateTime() == other.hasCreateTime()); + if (hasCreateTime()) { + result = result && getCreateTime() + .equals(other.getCreateTime()); + } + result = result && (hasUpdateTime() == other.hasUpdateTime()); + if (hasUpdateTime()) { + result = result && getUpdateTime() + .equals(other.getUpdateTime()); + } + result = result && internalGetLabels().equals( + other.internalGetLabels()); + result = result && (hasPlacement() == other.hasPlacement()); + if (hasPlacement()) { + result = result && getPlacement() + .equals(other.getPlacement()); + } + result = result && getJobsList() + .equals(other.getJobsList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasPlacement()) { + hash = (37 * hash) + PLACEMENT_FIELD_NUMBER; + hash = (53 * hash) + getPlacement().hashCode(); + } + if (getJobsCount() > 0) { + hash = (37 * hash) + JOBS_FIELD_NUMBER; + hash = (53 * hash) + getJobsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.WorkflowTemplate prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A Cloud Dataproc workflow template resource.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowTemplate} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.WorkflowTemplate) + com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 6: + return internalGetLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 6: + return internalGetMutableLabels(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowTemplate.class, com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.WorkflowTemplate.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getJobsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + id_ = ""; + + name_ = ""; + + version_ = 0; + + if (createTimeBuilder_ == null) { + createTime_ = null; + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + if (updateTimeBuilder_ == null) { + updateTime_ = null; + } else { + updateTime_ = null; + updateTimeBuilder_ = null; + } + internalGetMutableLabels().clear(); + if (placementBuilder_ == null) { + placement_ = null; + } else { + placement_ = null; + placementBuilder_ = null; + } + if (jobsBuilder_ == null) { + jobs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + jobsBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate build() { + com.google.cloud.dataproc.v1beta2.WorkflowTemplate result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate buildPartial() { + com.google.cloud.dataproc.v1beta2.WorkflowTemplate result = new com.google.cloud.dataproc.v1beta2.WorkflowTemplate(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + result.id_ = id_; + result.name_ = name_; + result.version_ = version_; + if (createTimeBuilder_ == null) { + result.createTime_ = createTime_; + } else { + result.createTime_ = createTimeBuilder_.build(); + } + if (updateTimeBuilder_ == null) { + result.updateTime_ = updateTime_; + } else { + result.updateTime_ = updateTimeBuilder_.build(); + } + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + if (placementBuilder_ == null) { + result.placement_ = placement_; + } else { + result.placement_ = placementBuilder_.build(); + } + if (jobsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + jobs_ = java.util.Collections.unmodifiableList(jobs_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.jobs_ = jobs_; + } else { + result.jobs_ = jobsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.WorkflowTemplate) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.WorkflowTemplate)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.WorkflowTemplate other) { + if (other == com.google.cloud.dataproc.v1beta2.WorkflowTemplate.getDefaultInstance()) return this; + if (!other.getId().isEmpty()) { + id_ = other.id_; + onChanged(); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getVersion() != 0) { + setVersion(other.getVersion()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + internalGetMutableLabels().mergeFrom( + other.internalGetLabels()); + if (other.hasPlacement()) { + mergePlacement(other.getPlacement()); + } + if (jobsBuilder_ == null) { + if (!other.jobs_.isEmpty()) { + if (jobs_.isEmpty()) { + jobs_ = other.jobs_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureJobsIsMutable(); + jobs_.addAll(other.jobs_); + } + onChanged(); + } + } else { + if (!other.jobs_.isEmpty()) { + if (jobsBuilder_.isEmpty()) { + jobsBuilder_.dispose(); + jobsBuilder_ = null; + jobs_ = other.jobs_; + bitField0_ = (bitField0_ & ~0x00000080); + jobsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getJobsFieldBuilder() : null; + } else { + jobsBuilder_.addAllMessages(other.jobs_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.WorkflowTemplate parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.WorkflowTemplate) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object id_ = ""; + /** + *
+     * Required. The template id.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string id = 2; + */ + public java.lang.String getId() { + java.lang.Object ref = id_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + id_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The template id.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string id = 2; + */ + public com.google.protobuf.ByteString + getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + id_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The template id.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string id = 2; + */ + public Builder setId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + id_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The template id.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string id = 2; + */ + public Builder clearId() { + + id_ = getDefaultInstance().getId(); + onChanged(); + return this; + } + /** + *
+     * Required. The template id.
+     * The id must contain only letters (a-z, A-Z), numbers (0-9),
+     * underscores (_), and hyphens (-). Cannot begin or end with underscore
+     * or hyphen. Must consist of between 3 and 50 characters.
+     * 
+ * + * string id = 2; + */ + public Builder setIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + id_ = value; + onChanged(); + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * Output only. The "resource name" of the template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Output only. The "resource name" of the template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Output only. The "resource name" of the template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * Output only. The "resource name" of the template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * Output only. The "resource name" of the template, as described
+     * in https://cloud.google.com/apis/design/resource_names of the form
+     * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+     * 
+ * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int version_ ; + /** + *
+     * Optional. Used to perform a consistent read-modify-write.
+     * This field should be left blank for a `CreateWorkflowTemplate` request. It
+     * is required for an `UpdateWorkflowTemplate` request, and must match the
+     * current server version. A typical update template flow would fetch the
+     * current template with a `GetWorkflowTemplate` request, which will return
+     * the current template with the `version` field filled in with the
+     * current server version. The user updates other fields in the template,
+     * then returns it as part of the `UpdateWorkflowTemplate` request.
+     * 
+ * + * int32 version = 3; + */ + public int getVersion() { + return version_; + } + /** + *
+     * Optional. Used to perform a consistent read-modify-write.
+     * This field should be left blank for a `CreateWorkflowTemplate` request. It
+     * is required for an `UpdateWorkflowTemplate` request, and must match the
+     * current server version. A typical update template flow would fetch the
+     * current template with a `GetWorkflowTemplate` request, which will return
+     * the current template with the `version` field filled in with the
+     * current server version. The user updates other fields in the template,
+     * then returns it as part of the `UpdateWorkflowTemplate` request.
+     * 
+ * + * int32 version = 3; + */ + public Builder setVersion(int value) { + + version_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. Used to perform a consistent read-modify-write.
+     * This field should be left blank for a `CreateWorkflowTemplate` request. It
+     * is required for an `UpdateWorkflowTemplate` request, and must match the
+     * current server version. A typical update template flow would fetch the
+     * current template with a `GetWorkflowTemplate` request, which will return
+     * the current template with the `version` field filled in with the
+     * current server version. The user updates other fields in the template,
+     * then returns it as part of the `UpdateWorkflowTemplate` request.
+     * 
+ * + * int32 version = 3; + */ + public Builder clearVersion() { + + version_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> createTimeBuilder_; + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public boolean hasCreateTime() { + return createTimeBuilder_ != null || createTime_ != null; + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + onChanged(); + } else { + createTimeBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public Builder setCreateTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + onChanged(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (createTime_ != null) { + createTime_ = + com.google.protobuf.Timestamp.newBuilder(createTime_).mergeFrom(value).buildPartial(); + } else { + createTime_ = value; + } + onChanged(); + } else { + createTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public Builder clearCreateTime() { + if (createTimeBuilder_ == null) { + createTime_ = null; + onChanged(); + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null ? + com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + } + /** + *
+     * Output only. The time template was created.
+     * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), + getParentForChildren(), + isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> updateTimeBuilder_; + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public boolean hasUpdateTime() { + return updateTimeBuilder_ != null || updateTime_ != null; + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + onChanged(); + } else { + updateTimeBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public Builder setUpdateTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + onChanged(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (updateTime_ != null) { + updateTime_ = + com.google.protobuf.Timestamp.newBuilder(updateTime_).mergeFrom(value).buildPartial(); + } else { + updateTime_ = value; + } + onChanged(); + } else { + updateTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public Builder clearUpdateTime() { + if (updateTimeBuilder_ == null) { + updateTime_ = null; + onChanged(); + } else { + updateTime_ = null; + updateTimeBuilder_ = null; + } + + return this; + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + + onChanged(); + return getUpdateTimeFieldBuilder().getBuilder(); + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null ? + com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + } + /** + *
+     * Output only. The time template was last updated.
+     * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), + getParentForChildren(), + isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private com.google.protobuf.MapField< + java.lang.String, java.lang.String> labels_; + private com.google.protobuf.MapField + internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + private com.google.protobuf.MapField + internalGetMutableLabels() { + onChanged();; + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField( + LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + + public boolean containsLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetLabels().getMap().containsKey(key); + } + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + + public java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + + public java.lang.String getLabelsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + internalGetMutableLabels().getMutableMap() + .clear(); + return this; + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + + public Builder removeLabels( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutableLabels() { + return internalGetMutableLabels().getMutableMap(); + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + public Builder putLabels( + java.lang.String key, + java.lang.String value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutableLabels().getMutableMap() + .put(key, value); + return this; + } + /** + *
+     * Optional. The labels to associate with this template. These labels
+     * will be propagated to all jobs and clusters created by the workflow
+     * instance.
+     * Label **keys** must contain 1 to 63 characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * Label **values** may be empty, but, if present, must contain 1 to 63
+     * characters, and must conform to
+     * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+     * No more than 32 labels can be associated with a template.
+     * 
+ * + * map<string, string> labels = 6; + */ + + public Builder putAllLabels( + java.util.Map values) { + internalGetMutableLabels().getMutableMap() + .putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder> placementBuilder_; + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public boolean hasPlacement() { + return placementBuilder_ != null || placement_ != null; + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement getPlacement() { + if (placementBuilder_ == null) { + return placement_ == null ? com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.getDefaultInstance() : placement_; + } else { + return placementBuilder_.getMessage(); + } + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public Builder setPlacement(com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement value) { + if (placementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + placement_ = value; + onChanged(); + } else { + placementBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public Builder setPlacement( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder builderForValue) { + if (placementBuilder_ == null) { + placement_ = builderForValue.build(); + onChanged(); + } else { + placementBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public Builder mergePlacement(com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement value) { + if (placementBuilder_ == null) { + if (placement_ != null) { + placement_ = + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.newBuilder(placement_).mergeFrom(value).buildPartial(); + } else { + placement_ = value; + } + onChanged(); + } else { + placementBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public Builder clearPlacement() { + if (placementBuilder_ == null) { + placement_ = null; + onChanged(); + } else { + placement_ = null; + placementBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder getPlacementBuilder() { + + onChanged(); + return getPlacementFieldBuilder().getBuilder(); + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder getPlacementOrBuilder() { + if (placementBuilder_ != null) { + return placementBuilder_.getMessageOrBuilder(); + } else { + return placement_ == null ? + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.getDefaultInstance() : placement_; + } + } + /** + *
+     * Required. WorkflowTemplate scheduling information.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder> + getPlacementFieldBuilder() { + if (placementBuilder_ == null) { + placementBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder>( + getPlacement(), + getParentForChildren(), + isClean()); + placement_ = null; + } + return placementBuilder_; + } + + private java.util.List jobs_ = + java.util.Collections.emptyList(); + private void ensureJobsIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + jobs_ = new java.util.ArrayList(jobs_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.OrderedJob, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder, com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder> jobsBuilder_; + + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public java.util.List getJobsList() { + if (jobsBuilder_ == null) { + return java.util.Collections.unmodifiableList(jobs_); + } else { + return jobsBuilder_.getMessageList(); + } + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public int getJobsCount() { + if (jobsBuilder_ == null) { + return jobs_.size(); + } else { + return jobsBuilder_.getCount(); + } + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJob getJobs(int index) { + if (jobsBuilder_ == null) { + return jobs_.get(index); + } else { + return jobsBuilder_.getMessage(index); + } + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder setJobs( + int index, com.google.cloud.dataproc.v1beta2.OrderedJob value) { + if (jobsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureJobsIsMutable(); + jobs_.set(index, value); + onChanged(); + } else { + jobsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder setJobs( + int index, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder builderForValue) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.set(index, builderForValue.build()); + onChanged(); + } else { + jobsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder addJobs(com.google.cloud.dataproc.v1beta2.OrderedJob value) { + if (jobsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureJobsIsMutable(); + jobs_.add(value); + onChanged(); + } else { + jobsBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder addJobs( + int index, com.google.cloud.dataproc.v1beta2.OrderedJob value) { + if (jobsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureJobsIsMutable(); + jobs_.add(index, value); + onChanged(); + } else { + jobsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder addJobs( + com.google.cloud.dataproc.v1beta2.OrderedJob.Builder builderForValue) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.add(builderForValue.build()); + onChanged(); + } else { + jobsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder addJobs( + int index, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder builderForValue) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.add(index, builderForValue.build()); + onChanged(); + } else { + jobsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder addAllJobs( + java.lang.Iterable values) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, jobs_); + onChanged(); + } else { + jobsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder clearJobs() { + if (jobsBuilder_ == null) { + jobs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + jobsBuilder_.clear(); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public Builder removeJobs(int index) { + if (jobsBuilder_ == null) { + ensureJobsIsMutable(); + jobs_.remove(index); + onChanged(); + } else { + jobsBuilder_.remove(index); + } + return this; + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJob.Builder getJobsBuilder( + int index) { + return getJobsFieldBuilder().getBuilder(index); + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder getJobsOrBuilder( + int index) { + if (jobsBuilder_ == null) { + return jobs_.get(index); } else { + return jobsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public java.util.List + getJobsOrBuilderList() { + if (jobsBuilder_ != null) { + return jobsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(jobs_); + } + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJob.Builder addJobsBuilder() { + return getJobsFieldBuilder().addBuilder( + com.google.cloud.dataproc.v1beta2.OrderedJob.getDefaultInstance()); + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public com.google.cloud.dataproc.v1beta2.OrderedJob.Builder addJobsBuilder( + int index) { + return getJobsFieldBuilder().addBuilder( + index, com.google.cloud.dataproc.v1beta2.OrderedJob.getDefaultInstance()); + } + /** + *
+     * Required. The Directed Acyclic Graph of Jobs to submit.
+     * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + public java.util.List + getJobsBuilderList() { + return getJobsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.OrderedJob, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder, com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder> + getJobsFieldBuilder() { + if (jobsBuilder_ == null) { + jobsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.OrderedJob, com.google.cloud.dataproc.v1beta2.OrderedJob.Builder, com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder>( + jobs_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + jobs_ = null; + } + return jobsBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.WorkflowTemplate) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowTemplate) + private static final com.google.cloud.dataproc.v1beta2.WorkflowTemplate DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.WorkflowTemplate(); + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplate getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WorkflowTemplate parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WorkflowTemplate(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateName.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateName.java new file mode 100644 index 000000000000..c45da990a33a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateName.java @@ -0,0 +1,212 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package com.google.cloud.dataproc.v1beta2; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import java.util.Map; +import java.util.ArrayList; +import java.util.List; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +@javax.annotation.Generated("by GAPIC protoc plugin") +public class WorkflowTemplateName implements ResourceName { + + private static final PathTemplate PATH_TEMPLATE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/regions/{region}/workflowTemplates/{workflow_template}"); + + private volatile Map fieldValuesMap; + + private final String project; + private final String region; + private final String workflowTemplate; + + public String getProject() { + return project; + } + + public String getRegion() { + return region; + } + + public String getWorkflowTemplate() { + return workflowTemplate; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + private WorkflowTemplateName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + region = Preconditions.checkNotNull(builder.getRegion()); + workflowTemplate = Preconditions.checkNotNull(builder.getWorkflowTemplate()); + } + + public static WorkflowTemplateName of(String project, String region, String workflowTemplate) { + return newBuilder() + .setProject(project) + .setRegion(region) + .setWorkflowTemplate(workflowTemplate) + .build(); + } + + public static String format(String project, String region, String workflowTemplate) { + return newBuilder() + .setProject(project) + .setRegion(region) + .setWorkflowTemplate(workflowTemplate) + .build() + .toString(); + } + + public static WorkflowTemplateName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PATH_TEMPLATE.validatedMatch(formattedString, "WorkflowTemplateName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("region"), matchMap.get("workflow_template")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList(values.size()); + for (WorkflowTemplateName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PATH_TEMPLATE.matches(formattedString); + } + + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + fieldMapBuilder.put("project", project); + fieldMapBuilder.put("region", region); + fieldMapBuilder.put("workflowTemplate", workflowTemplate); + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PATH_TEMPLATE.instantiate("project", project, "region", region, "workflow_template", workflowTemplate); + } + + /** Builder for WorkflowTemplateName. */ + public static class Builder { + + private String project; + private String region; + private String workflowTemplate; + + public String getProject() { + return project; + } + + public String getRegion() { + return region; + } + + public String getWorkflowTemplate() { + return workflowTemplate; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setRegion(String region) { + this.region = region; + return this; + } + + public Builder setWorkflowTemplate(String workflowTemplate) { + this.workflowTemplate = workflowTemplate; + return this; + } + + private Builder() { + } + + private Builder(WorkflowTemplateName workflowTemplateName) { + project = workflowTemplateName.project; + region = workflowTemplateName.region; + workflowTemplate = workflowTemplateName.workflowTemplate; + } + + public WorkflowTemplateName build() { + return new WorkflowTemplateName(this); + } + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o instanceof WorkflowTemplateName) { + WorkflowTemplateName that = (WorkflowTemplateName) o; + return (this.project.equals(that.project)) + && (this.region.equals(that.region)) + && (this.workflowTemplate.equals(that.workflowTemplate)); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= project.hashCode(); + h *= 1000003; + h ^= region.hashCode(); + h *= 1000003; + h ^= workflowTemplate.hashCode(); + return h; + } +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateOrBuilder.java new file mode 100644 index 000000000000..894e69ae91bc --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateOrBuilder.java @@ -0,0 +1,284 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface WorkflowTemplateOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.WorkflowTemplate) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The template id.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string id = 2; + */ + java.lang.String getId(); + /** + *
+   * Required. The template id.
+   * The id must contain only letters (a-z, A-Z), numbers (0-9),
+   * underscores (_), and hyphens (-). Cannot begin or end with underscore
+   * or hyphen. Must consist of between 3 and 50 characters.
+   * 
+ * + * string id = 2; + */ + com.google.protobuf.ByteString + getIdBytes(); + + /** + *
+   * Output only. The "resource name" of the template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + java.lang.String getName(); + /** + *
+   * Output only. The "resource name" of the template, as described
+   * in https://cloud.google.com/apis/design/resource_names of the form
+   * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
+   * 
+ * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * Optional. Used to perform a consistent read-modify-write.
+   * This field should be left blank for a `CreateWorkflowTemplate` request. It
+   * is required for an `UpdateWorkflowTemplate` request, and must match the
+   * current server version. A typical update template flow would fetch the
+   * current template with a `GetWorkflowTemplate` request, which will return
+   * the current template with the `version` field filled in with the
+   * current server version. The user updates other fields in the template,
+   * then returns it as part of the `UpdateWorkflowTemplate` request.
+   * 
+ * + * int32 version = 3; + */ + int getVersion(); + + /** + *
+   * Output only. The time template was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + boolean hasCreateTime(); + /** + *
+   * Output only. The time template was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + com.google.protobuf.Timestamp getCreateTime(); + /** + *
+   * Output only. The time template was created.
+   * 
+ * + * .google.protobuf.Timestamp create_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + *
+   * Output only. The time template was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + boolean hasUpdateTime(); + /** + *
+   * Output only. The time template was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + com.google.protobuf.Timestamp getUpdateTime(); + /** + *
+   * Output only. The time template was last updated.
+   * 
+ * + * .google.protobuf.Timestamp update_time = 5; + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + int getLabelsCount(); + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + boolean containsLabels( + java.lang.String key); + /** + * Use {@link #getLabelsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getLabels(); + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + java.util.Map + getLabelsMap(); + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + + java.lang.String getLabelsOrDefault( + java.lang.String key, + java.lang.String defaultValue); + /** + *
+   * Optional. The labels to associate with this template. These labels
+   * will be propagated to all jobs and clusters created by the workflow
+   * instance.
+   * Label **keys** must contain 1 to 63 characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * Label **values** may be empty, but, if present, must contain 1 to 63
+   * characters, and must conform to
+   * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
+   * No more than 32 labels can be associated with a template.
+   * 
+ * + * map<string, string> labels = 6; + */ + + java.lang.String getLabelsOrThrow( + java.lang.String key); + + /** + *
+   * Required. WorkflowTemplate scheduling information.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + boolean hasPlacement(); + /** + *
+   * Required. WorkflowTemplate scheduling information.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement getPlacement(); + /** + *
+   * Required. WorkflowTemplate scheduling information.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement placement = 7; + */ + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder getPlacementOrBuilder(); + + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + java.util.List + getJobsList(); + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + com.google.cloud.dataproc.v1beta2.OrderedJob getJobs(int index); + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + int getJobsCount(); + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + java.util.List + getJobsOrBuilderList(); + /** + *
+   * Required. The Directed Acyclic Graph of Jobs to submit.
+   * 
+ * + * repeated .google.cloud.dataproc.v1beta2.OrderedJob jobs = 8; + */ + com.google.cloud.dataproc.v1beta2.OrderedJobOrBuilder getJobsOrBuilder( + int index); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacement.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacement.java new file mode 100644 index 000000000000..da0a133d732a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacement.java @@ -0,0 +1,1020 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * Specifies workflow execution target.
+ * Either `managed_cluster` or `cluster_selector` is required.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement} + */ +public final class WorkflowTemplatePlacement extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) + WorkflowTemplatePlacementOrBuilder { +private static final long serialVersionUID = 0L; + // Use WorkflowTemplatePlacement.newBuilder() to construct. + private WorkflowTemplatePlacement(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WorkflowTemplatePlacement() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WorkflowTemplatePlacement( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder subBuilder = null; + if (placementCase_ == 1) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_).toBuilder(); + } + placement_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.ManagedCluster.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_); + placement_ = subBuilder.buildPartial(); + } + placementCase_ = 1; + break; + } + case 18: { + com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder subBuilder = null; + if (placementCase_ == 2) { + subBuilder = ((com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_).toBuilder(); + } + placement_ = + input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterSelector.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_); + placement_ = subBuilder.buildPartial(); + } + placementCase_ = 2; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.class, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder.class); + } + + private int placementCase_ = 0; + private java.lang.Object placement_; + public enum PlacementCase + implements com.google.protobuf.Internal.EnumLite { + MANAGED_CLUSTER(1), + CLUSTER_SELECTOR(2), + PLACEMENT_NOT_SET(0); + private final int value; + private PlacementCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static PlacementCase valueOf(int value) { + return forNumber(value); + } + + public static PlacementCase forNumber(int value) { + switch (value) { + case 1: return MANAGED_CLUSTER; + case 2: return CLUSTER_SELECTOR; + case 0: return PLACEMENT_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public PlacementCase + getPlacementCase() { + return PlacementCase.forNumber( + placementCase_); + } + + public static final int MANAGED_CLUSTER_FIELD_NUMBER = 1; + /** + *
+   * Optional. A cluster that is managed by the workflow.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public boolean hasManagedCluster() { + return placementCase_ == 1; + } + /** + *
+   * Optional. A cluster that is managed by the workflow.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public com.google.cloud.dataproc.v1beta2.ManagedCluster getManagedCluster() { + if (placementCase_ == 1) { + return (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_; + } + return com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } + /** + *
+   * Optional. A cluster that is managed by the workflow.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder getManagedClusterOrBuilder() { + if (placementCase_ == 1) { + return (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_; + } + return com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } + + public static final int CLUSTER_SELECTOR_FIELD_NUMBER = 2; + /** + *
+   * Optional. A selector that chooses target cluster for jobs based
+   * on metadata.
+   * The selector is evaluated at the time each job is submitted.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public boolean hasClusterSelector() { + return placementCase_ == 2; + } + /** + *
+   * Optional. A selector that chooses target cluster for jobs based
+   * on metadata.
+   * The selector is evaluated at the time each job is submitted.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterSelector getClusterSelector() { + if (placementCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_; + } + return com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } + /** + *
+   * Optional. A selector that chooses target cluster for jobs based
+   * on metadata.
+   * The selector is evaluated at the time each job is submitted.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder getClusterSelectorOrBuilder() { + if (placementCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_; + } + return com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (placementCase_ == 1) { + output.writeMessage(1, (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_); + } + if (placementCase_ == 2) { + output.writeMessage(2, (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (placementCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_); + } + if (placementCase_ == 2) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement other = (com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) obj; + + boolean result = true; + result = result && getPlacementCase().equals( + other.getPlacementCase()); + if (!result) return false; + switch (placementCase_) { + case 1: + result = result && getManagedCluster() + .equals(other.getManagedCluster()); + break; + case 2: + result = result && getClusterSelector() + .equals(other.getClusterSelector()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (placementCase_) { + case 1: + hash = (37 * hash) + MANAGED_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + getManagedCluster().hashCode(); + break; + case 2: + hash = (37 * hash) + CLUSTER_SELECTOR_FIELD_NUMBER; + hash = (53 * hash) + getClusterSelector().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Specifies workflow execution target.
+   * Either `managed_cluster` or `cluster_selector` is required.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacementOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.class, com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + placementCase_ = 0; + placement_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatesProto.internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement build() { + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement buildPartial() { + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement result = new com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement(this); + if (placementCase_ == 1) { + if (managedClusterBuilder_ == null) { + result.placement_ = placement_; + } else { + result.placement_ = managedClusterBuilder_.build(); + } + } + if (placementCase_ == 2) { + if (clusterSelectorBuilder_ == null) { + result.placement_ = placement_; + } else { + result.placement_ = clusterSelectorBuilder_.build(); + } + } + result.placementCase_ = placementCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement other) { + if (other == com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.getDefaultInstance()) return this; + switch (other.getPlacementCase()) { + case MANAGED_CLUSTER: { + mergeManagedCluster(other.getManagedCluster()); + break; + } + case CLUSTER_SELECTOR: { + mergeClusterSelector(other.getClusterSelector()); + break; + } + case PLACEMENT_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int placementCase_ = 0; + private java.lang.Object placement_; + public PlacementCase + getPlacementCase() { + return PlacementCase.forNumber( + placementCase_); + } + + public Builder clearPlacement() { + placementCase_ = 0; + placement_ = null; + onChanged(); + return this; + } + + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ManagedCluster, com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder, com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder> managedClusterBuilder_; + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public boolean hasManagedCluster() { + return placementCase_ == 1; + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public com.google.cloud.dataproc.v1beta2.ManagedCluster getManagedCluster() { + if (managedClusterBuilder_ == null) { + if (placementCase_ == 1) { + return (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_; + } + return com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } else { + if (placementCase_ == 1) { + return managedClusterBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public Builder setManagedCluster(com.google.cloud.dataproc.v1beta2.ManagedCluster value) { + if (managedClusterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + placement_ = value; + onChanged(); + } else { + managedClusterBuilder_.setMessage(value); + } + placementCase_ = 1; + return this; + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public Builder setManagedCluster( + com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder builderForValue) { + if (managedClusterBuilder_ == null) { + placement_ = builderForValue.build(); + onChanged(); + } else { + managedClusterBuilder_.setMessage(builderForValue.build()); + } + placementCase_ = 1; + return this; + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public Builder mergeManagedCluster(com.google.cloud.dataproc.v1beta2.ManagedCluster value) { + if (managedClusterBuilder_ == null) { + if (placementCase_ == 1 && + placement_ != com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance()) { + placement_ = com.google.cloud.dataproc.v1beta2.ManagedCluster.newBuilder((com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_) + .mergeFrom(value).buildPartial(); + } else { + placement_ = value; + } + onChanged(); + } else { + if (placementCase_ == 1) { + managedClusterBuilder_.mergeFrom(value); + } + managedClusterBuilder_.setMessage(value); + } + placementCase_ = 1; + return this; + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public Builder clearManagedCluster() { + if (managedClusterBuilder_ == null) { + if (placementCase_ == 1) { + placementCase_ = 0; + placement_ = null; + onChanged(); + } + } else { + if (placementCase_ == 1) { + placementCase_ = 0; + placement_ = null; + } + managedClusterBuilder_.clear(); + } + return this; + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder getManagedClusterBuilder() { + return getManagedClusterFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + public com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder getManagedClusterOrBuilder() { + if ((placementCase_ == 1) && (managedClusterBuilder_ != null)) { + return managedClusterBuilder_.getMessageOrBuilder(); + } else { + if (placementCase_ == 1) { + return (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_; + } + return com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } + } + /** + *
+     * Optional. A cluster that is managed by the workflow.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ManagedCluster, com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder, com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder> + getManagedClusterFieldBuilder() { + if (managedClusterBuilder_ == null) { + if (!(placementCase_ == 1)) { + placement_ = com.google.cloud.dataproc.v1beta2.ManagedCluster.getDefaultInstance(); + } + managedClusterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ManagedCluster, com.google.cloud.dataproc.v1beta2.ManagedCluster.Builder, com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder>( + (com.google.cloud.dataproc.v1beta2.ManagedCluster) placement_, + getParentForChildren(), + isClean()); + placement_ = null; + } + placementCase_ = 1; + onChanged();; + return managedClusterBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterSelector, com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder, com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder> clusterSelectorBuilder_; + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public boolean hasClusterSelector() { + return placementCase_ == 2; + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterSelector getClusterSelector() { + if (clusterSelectorBuilder_ == null) { + if (placementCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_; + } + return com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } else { + if (placementCase_ == 2) { + return clusterSelectorBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public Builder setClusterSelector(com.google.cloud.dataproc.v1beta2.ClusterSelector value) { + if (clusterSelectorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + placement_ = value; + onChanged(); + } else { + clusterSelectorBuilder_.setMessage(value); + } + placementCase_ = 2; + return this; + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public Builder setClusterSelector( + com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder builderForValue) { + if (clusterSelectorBuilder_ == null) { + placement_ = builderForValue.build(); + onChanged(); + } else { + clusterSelectorBuilder_.setMessage(builderForValue.build()); + } + placementCase_ = 2; + return this; + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public Builder mergeClusterSelector(com.google.cloud.dataproc.v1beta2.ClusterSelector value) { + if (clusterSelectorBuilder_ == null) { + if (placementCase_ == 2 && + placement_ != com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance()) { + placement_ = com.google.cloud.dataproc.v1beta2.ClusterSelector.newBuilder((com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_) + .mergeFrom(value).buildPartial(); + } else { + placement_ = value; + } + onChanged(); + } else { + if (placementCase_ == 2) { + clusterSelectorBuilder_.mergeFrom(value); + } + clusterSelectorBuilder_.setMessage(value); + } + placementCase_ = 2; + return this; + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public Builder clearClusterSelector() { + if (clusterSelectorBuilder_ == null) { + if (placementCase_ == 2) { + placementCase_ = 0; + placement_ = null; + onChanged(); + } + } else { + if (placementCase_ == 2) { + placementCase_ = 0; + placement_ = null; + } + clusterSelectorBuilder_.clear(); + } + return this; + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder getClusterSelectorBuilder() { + return getClusterSelectorFieldBuilder().getBuilder(); + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + public com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder getClusterSelectorOrBuilder() { + if ((placementCase_ == 2) && (clusterSelectorBuilder_ != null)) { + return clusterSelectorBuilder_.getMessageOrBuilder(); + } else { + if (placementCase_ == 2) { + return (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_; + } + return com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } + } + /** + *
+     * Optional. A selector that chooses target cluster for jobs based
+     * on metadata.
+     * The selector is evaluated at the time each job is submitted.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterSelector, com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder, com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder> + getClusterSelectorFieldBuilder() { + if (clusterSelectorBuilder_ == null) { + if (!(placementCase_ == 2)) { + placement_ = com.google.cloud.dataproc.v1beta2.ClusterSelector.getDefaultInstance(); + } + clusterSelectorBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1beta2.ClusterSelector, com.google.cloud.dataproc.v1beta2.ClusterSelector.Builder, com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder>( + (com.google.cloud.dataproc.v1beta2.ClusterSelector) placement_, + getParentForChildren(), + isClean()); + placement_ = null; + } + placementCase_ = 2; + onChanged();; + return clusterSelectorBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) + private static final com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement(); + } + + public static com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WorkflowTemplatePlacement parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WorkflowTemplatePlacement(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacementOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacementOrBuilder.java new file mode 100644 index 000000000000..8f2c07a39dc9 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacementOrBuilder.java @@ -0,0 +1,67 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface WorkflowTemplatePlacementOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional. A cluster that is managed by the workflow.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + boolean hasManagedCluster(); + /** + *
+   * Optional. A cluster that is managed by the workflow.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + com.google.cloud.dataproc.v1beta2.ManagedCluster getManagedCluster(); + /** + *
+   * Optional. A cluster that is managed by the workflow.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ManagedCluster managed_cluster = 1; + */ + com.google.cloud.dataproc.v1beta2.ManagedClusterOrBuilder getManagedClusterOrBuilder(); + + /** + *
+   * Optional. A selector that chooses target cluster for jobs based
+   * on metadata.
+   * The selector is evaluated at the time each job is submitted.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + boolean hasClusterSelector(); + /** + *
+   * Optional. A selector that chooses target cluster for jobs based
+   * on metadata.
+   * The selector is evaluated at the time each job is submitted.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + com.google.cloud.dataproc.v1beta2.ClusterSelector getClusterSelector(); + /** + *
+   * Optional. A selector that chooses target cluster for jobs based
+   * on metadata.
+   * The selector is evaluated at the time each job is submitted.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.ClusterSelector cluster_selector = 2; + */ + com.google.cloud.dataproc.v1beta2.ClusterSelectorOrBuilder getClusterSelectorOrBuilder(); + + public com.google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.PlacementCase getPlacementCase(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatesProto.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatesProto.java new file mode 100644 index 000000000000..ffe4b96b2118 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatesProto.java @@ -0,0 +1,449 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package com.google.cloud.dataproc.v1beta2; + +public final class WorkflowTemplatesProto { + private WorkflowTemplatesProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_LabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_LabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_ClusterLabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_ClusterLabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_LabelsEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_ParametersEntry_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_ParametersEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n6google/cloud/dataproc/v1beta2/workflow" + + "_templates.proto\022\035google.cloud.dataproc." + + "v1beta2\032\034google/api/annotations.proto\032,g" + + "oogle/cloud/dataproc/v1beta2/clusters.pr" + + "oto\032(google/cloud/dataproc/v1beta2/jobs." + + "proto\032#google/longrunning/operations.pro" + + "to\032\033google/protobuf/empty.proto\032\037google/" + + "protobuf/timestamp.proto\"\241\003\n\020WorkflowTem" + + "plate\022\n\n\002id\030\002 \001(\t\022\014\n\004name\030\001 \001(\t\022\017\n\007versi" + + "on\030\003 \001(\005\022/\n\013create_time\030\004 \001(\0132\032.google.p" + + "rotobuf.Timestamp\022/\n\013update_time\030\005 \001(\0132\032" + + ".google.protobuf.Timestamp\022K\n\006labels\030\006 \003" + + "(\0132;.google.cloud.dataproc.v1beta2.Workf" + + "lowTemplate.LabelsEntry\022K\n\tplacement\030\007 \001" + + "(\01328.google.cloud.dataproc.v1beta2.Workf" + + "lowTemplatePlacement\0227\n\004jobs\030\010 \003(\0132).goo" + + "gle.cloud.dataproc.v1beta2.OrderedJob\032-\n" + + "\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t" + + ":\0028\001\"\276\001\n\031WorkflowTemplatePlacement\022H\n\017ma" + + "naged_cluster\030\001 \001(\0132-.google.cloud.datap" + + "roc.v1beta2.ManagedClusterH\000\022J\n\020cluster_" + + "selector\030\002 \001(\0132..google.cloud.dataproc.v" + + "1beta2.ClusterSelectorH\000B\013\n\tplacement\"\336\001" + + "\n\016ManagedCluster\022\024\n\014cluster_name\030\002 \001(\t\022<" + + "\n\006config\030\003 \001(\0132,.google.cloud.dataproc.v" + + "1beta2.ClusterConfig\022I\n\006labels\030\004 \003(\01329.g" + + "oogle.cloud.dataproc.v1beta2.ManagedClus" + + "ter.LabelsEntry\032-\n\013LabelsEntry\022\013\n\003key\030\001 " + + "\001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\260\001\n\017ClusterSelect" + + "or\022\014\n\004zone\030\001 \001(\t\022Y\n\016cluster_labels\030\002 \003(\013" + + "2A.google.cloud.dataproc.v1beta2.Cluster" + + "Selector.ClusterLabelsEntry\0324\n\022ClusterLa" + + "belsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028" + + "\001\"\373\004\n\nOrderedJob\022\017\n\007step_id\030\001 \001(\t\022>\n\nhad" + + "oop_job\030\002 \001(\0132(.google.cloud.dataproc.v1" + + "beta2.HadoopJobH\000\022<\n\tspark_job\030\003 \001(\0132\'.g" + + "oogle.cloud.dataproc.v1beta2.SparkJobH\000\022" + + "@\n\013pyspark_job\030\004 \001(\0132).google.cloud.data" + + "proc.v1beta2.PySparkJobH\000\022:\n\010hive_job\030\005 " + + "\001(\0132&.google.cloud.dataproc.v1beta2.Hive" + + "JobH\000\0228\n\007pig_job\030\006 \001(\0132%.google.cloud.da" + + "taproc.v1beta2.PigJobH\000\022C\n\rspark_sql_job" + + "\030\007 \001(\0132*.google.cloud.dataproc.v1beta2.S" + + "parkSqlJobH\000\022E\n\006labels\030\010 \003(\01325.google.cl" + + "oud.dataproc.v1beta2.OrderedJob.LabelsEn" + + "try\022@\n\nscheduling\030\t \001(\0132,.google.cloud.d" + + "ataproc.v1beta2.JobScheduling\022\035\n\025prerequ" + + "isite_step_ids\030\n \003(\t\032-\n\013LabelsEntry\022\013\n\003k" + + "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\n\n\010job_type\"" + + "\242\004\n\020WorkflowMetadata\022\020\n\010template\030\001 \001(\t\022\017" + + "\n\007version\030\002 \001(\005\022G\n\016create_cluster\030\003 \001(\0132" + + "/.google.cloud.dataproc.v1beta2.ClusterO" + + "peration\022;\n\005graph\030\004 \001(\0132,.google.cloud.d" + + "ataproc.v1beta2.WorkflowGraph\022G\n\016delete_" + + "cluster\030\005 \001(\0132/.google.cloud.dataproc.v1" + + "beta2.ClusterOperation\022D\n\005state\030\006 \001(\01625." + + "google.cloud.dataproc.v1beta2.WorkflowMe" + + "tadata.State\022\024\n\014cluster_name\030\007 \001(\t\022S\n\npa" + + "rameters\030\010 \003(\0132?.google.cloud.dataproc.v" + + "1beta2.WorkflowMetadata.ParametersEntry\032" + + "1\n\017ParametersEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value" + + "\030\002 \001(\t:\0028\001\"8\n\005State\022\013\n\007UNKNOWN\020\000\022\013\n\007PEND" + + "ING\020\001\022\013\n\007RUNNING\020\002\022\010\n\004DONE\020\003\"E\n\020ClusterO" + + "peration\022\024\n\014operation_id\030\001 \001(\t\022\r\n\005error\030" + + "\002 \001(\t\022\014\n\004done\030\003 \001(\010\"K\n\rWorkflowGraph\022:\n\005" + + "nodes\030\001 \003(\0132+.google.cloud.dataproc.v1be" + + "ta2.WorkflowNode\"\220\002\n\014WorkflowNode\022\017\n\007ste" + + "p_id\030\001 \001(\t\022\035\n\025prerequisite_step_ids\030\002 \003(" + + "\t\022\016\n\006job_id\030\003 \001(\t\022D\n\005state\030\005 \001(\01625.googl" + + "e.cloud.dataproc.v1beta2.WorkflowNode.No" + + "deState\022\r\n\005error\030\006 \001(\t\"k\n\tNodeState\022\033\n\027N" + + "ODE_STATUS_UNSPECIFIED\020\000\022\013\n\007BLOCKED\020\001\022\014\n" + + "\010RUNNABLE\020\002\022\013\n\007RUNNING\020\003\022\r\n\tCOMPLETED\020\004\022" + + "\n\n\006FAILED\020\005\"r\n\035CreateWorkflowTemplateReq" + + "uest\022\016\n\006parent\030\001 \001(\t\022A\n\010template\030\002 \001(\0132/" + + ".google.cloud.dataproc.v1beta2.WorkflowT" + + "emplate\";\n\032GetWorkflowTemplateRequest\022\014\n" + + "\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(\005\"X\n\"Instanti" + + "ateWorkflowTemplateRequest\022\014\n\004name\030\001 \001(\t" + + "\022\017\n\007version\030\002 \001(\005\022\023\n\013instance_id\030\003 \001(\t\"\222" + + "\001\n(InstantiateInlineWorkflowTemplateRequ" + + "est\022\016\n\006parent\030\001 \001(\t\022A\n\010template\030\002 \001(\0132/." + + "google.cloud.dataproc.v1beta2.WorkflowTe" + + "mplate\022\023\n\013instance_id\030\003 \001(\t\"b\n\035UpdateWor" + + "kflowTemplateRequest\022A\n\010template\030\001 \001(\0132/" + + ".google.cloud.dataproc.v1beta2.WorkflowT" + + "emplate\"U\n\034ListWorkflowTemplatesRequest\022" + + "\016\n\006parent\030\001 \001(\t\022\021\n\tpage_size\030\002 \001(\005\022\022\n\npa" + + "ge_token\030\003 \001(\t\"|\n\035ListWorkflowTemplatesR" + + "esponse\022B\n\ttemplates\030\001 \003(\0132/.google.clou" + + "d.dataproc.v1beta2.WorkflowTemplate\022\027\n\017n" + + "ext_page_token\030\002 \001(\t\">\n\035DeleteWorkflowTe" + + "mplateRequest\022\014\n\004name\030\001 \001(\t\022\017\n\007version\030\002" + + " \001(\0052\337\017\n\027WorkflowTemplateService\022\235\002\n\026Cre" + + "ateWorkflowTemplate\022<.google.cloud.datap" + + "roc.v1beta2.CreateWorkflowTemplateReques" + + "t\032/.google.cloud.dataproc.v1beta2.Workfl" + + "owTemplate\"\223\001\202\323\344\223\002\214\001\"8/v1beta2/{parent=p" + + "rojects/*/regions/*}/workflowTemplates:\010" + + "templateZF\":/v1beta2/{parent=projects/*/" + + "locations/*}/workflowTemplates:\010template" + + "\022\201\002\n\023GetWorkflowTemplate\0229.google.cloud." + + "dataproc.v1beta2.GetWorkflowTemplateRequ" + + "est\032/.google.cloud.dataproc.v1beta2.Work" + + "flowTemplate\"~\202\323\344\223\002x\0228/v1beta2/{name=pro" + + "jects/*/regions/*/workflowTemplates/*}Z<" + + "\022:/v1beta2/{name=projects/*/locations/*/" + + "workflowTemplates/*}\022\237\002\n\033InstantiateWork" + + "flowTemplate\022A.google.cloud.dataproc.v1b" + + "eta2.InstantiateWorkflowTemplateRequest\032" + + "\035.google.longrunning.Operation\"\235\001\202\323\344\223\002\226\001" + + "\"D/v1beta2/{name=projects/*/regions/*/wo" + + "rkflowTemplates/*}:instantiate:\001*ZK\"F/v1" + + "beta2/{name=projects/*/locations/*/workf" + + "lowTemplates/*}:instantiate:\001*\022\305\002\n!Insta" + + "ntiateInlineWorkflowTemplate\022G.google.cl" + + "oud.dataproc.v1beta2.InstantiateInlineWo" + + "rkflowTemplateRequest\032\035.google.longrunni" + + "ng.Operation\"\267\001\202\323\344\223\002\260\001\"J/v1beta2/{parent" + + "=projects/*/regions/*}/workflowTemplates" + + ":instantiateInline:\010templateZX\"L/v1beta2" + + "/{parent=projects/*/locations/*}/workflo" + + "wTemplates:instantiateInline:\010template\022\257" + + "\002\n\026UpdateWorkflowTemplate\022<.google.cloud" + + ".dataproc.v1beta2.UpdateWorkflowTemplate" + + "Request\032/.google.cloud.dataproc.v1beta2." + + "WorkflowTemplate\"\245\001\202\323\344\223\002\236\001\032A/v1beta2/{te" + + "mplate.name=projects/*/regions/*/workflo" + + "wTemplates/*}:\010templateZO\032C/v1beta2/{tem" + + "plate.name=projects/*/locations/*/workfl" + + "owTemplates/*}:\010template\022\222\002\n\025ListWorkflo" + + "wTemplates\022;.google.cloud.dataproc.v1bet" + + "a2.ListWorkflowTemplatesRequest\032<.google" + + ".cloud.dataproc.v1beta2.ListWorkflowTemp" + + "latesResponse\"~\202\323\344\223\002x\0228/v1beta2/{parent=" + + "projects/*/regions/*}/workflowTemplatesZ" + + "<\022:/v1beta2/{parent=projects/*/locations" + + "/*}/workflowTemplates\022\356\001\n\026DeleteWorkflow" + + "Template\022<.google.cloud.dataproc.v1beta2" + + ".DeleteWorkflowTemplateRequest\032\026.google." + + "protobuf.Empty\"~\202\323\344\223\002x*8/v1beta2/{name=p" + + "rojects/*/regions/*/workflowTemplates/*}" + + "Z<*:/v1beta2/{name=projects/*/locations/" + + "*/workflowTemplates/*}B\204\001\n!com.google.cl" + + "oud.dataproc.v1beta2B\026WorkflowTemplatesP" + + "rotoP\001ZEgoogle.golang.org/genproto/googl" + + "eapis/cloud/dataproc/v1beta2;dataprocb\006p" + + "roto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.cloud.dataproc.v1beta2.ClustersProto.getDescriptor(), + com.google.cloud.dataproc.v1beta2.JobsProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }, assigner); + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor, + new java.lang.String[] { "Id", "Name", "Version", "CreateTime", "UpdateTime", "Labels", "Placement", "Jobs", }); + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_LabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplate_LabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowTemplatePlacement_descriptor, + new java.lang.String[] { "ManagedCluster", "ClusterSelector", "Placement", }); + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor, + new java.lang.String[] { "ClusterName", "Config", "Labels", }); + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_LabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ManagedCluster_LabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor, + new java.lang.String[] { "Zone", "ClusterLabels", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_ClusterLabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_ClusterLabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterSelector_ClusterLabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor, + new java.lang.String[] { "StepId", "HadoopJob", "SparkJob", "PysparkJob", "HiveJob", "PigJob", "SparkSqlJob", "Labels", "Scheduling", "PrerequisiteStepIds", "JobType", }); + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_LabelsEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_LabelsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_OrderedJob_LabelsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor, + new java.lang.String[] { "Template", "Version", "CreateCluster", "Graph", "DeleteCluster", "State", "ClusterName", "Parameters", }); + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_ParametersEntry_descriptor = + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_ParametersEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowMetadata_ParametersEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ClusterOperation_descriptor, + new java.lang.String[] { "OperationId", "Error", "Done", }); + internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowGraph_descriptor, + new java.lang.String[] { "Nodes", }); + internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_WorkflowNode_descriptor, + new java.lang.String[] { "StepId", "PrerequisiteStepIds", "JobId", "State", "Error", }); + internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_CreateWorkflowTemplateRequest_descriptor, + new java.lang.String[] { "Parent", "Template", }); + internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_GetWorkflowTemplateRequest_descriptor, + new java.lang.String[] { "Name", "Version", }); + internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_InstantiateWorkflowTemplateRequest_descriptor, + new java.lang.String[] { "Name", "Version", "InstanceId", }); + internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_InstantiateInlineWorkflowTemplateRequest_descriptor, + new java.lang.String[] { "Parent", "Template", "InstanceId", }); + internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_UpdateWorkflowTemplateRequest_descriptor, + new java.lang.String[] { "Template", }); + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesRequest_descriptor, + new java.lang.String[] { "Parent", "PageSize", "PageToken", }); + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_ListWorkflowTemplatesResponse_descriptor, + new java.lang.String[] { "Templates", "NextPageToken", }); + internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1beta2_DeleteWorkflowTemplateRequest_descriptor, + new java.lang.String[] { "Name", "Version", }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.AnnotationsProto.http); + com.google.protobuf.Descriptors.FileDescriptor + .internalUpdateFileDescriptor(descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.cloud.dataproc.v1beta2.ClustersProto.getDescriptor(); + com.google.cloud.dataproc.v1beta2.JobsProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplication.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplication.java new file mode 100644 index 000000000000..b9f1dd074e95 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplication.java @@ -0,0 +1,1187 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +/** + *
+ * A YARN application created by a job. Application information is a subset of
+ * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
+ * **Beta Feature**: This report is available for testing purposes only. It may
+ * be changed before final release.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.YarnApplication} + */ +public final class YarnApplication extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.YarnApplication) + YarnApplicationOrBuilder { +private static final long serialVersionUID = 0L; + // Use YarnApplication.newBuilder() to construct. + private YarnApplication(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private YarnApplication() { + name_ = ""; + state_ = 0; + progress_ = 0F; + trackingUrl_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private YarnApplication( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: { + int rawValue = input.readEnum(); + + state_ = rawValue; + break; + } + case 29: { + + progress_ = input.readFloat(); + break; + } + case 34: { + java.lang.String s = input.readStringRequireUtf8(); + + trackingUrl_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_YarnApplication_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_YarnApplication_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.YarnApplication.class, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder.class); + } + + /** + *
+   * The application state, corresponding to
+   * <code>YarnProtos.YarnApplicationStateProto</code>.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1beta2.YarnApplication.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * Status is unspecified.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + *
+     * Status is NEW.
+     * 
+ * + * NEW = 1; + */ + NEW(1), + /** + *
+     * Status is NEW_SAVING.
+     * 
+ * + * NEW_SAVING = 2; + */ + NEW_SAVING(2), + /** + *
+     * Status is SUBMITTED.
+     * 
+ * + * SUBMITTED = 3; + */ + SUBMITTED(3), + /** + *
+     * Status is ACCEPTED.
+     * 
+ * + * ACCEPTED = 4; + */ + ACCEPTED(4), + /** + *
+     * Status is RUNNING.
+     * 
+ * + * RUNNING = 5; + */ + RUNNING(5), + /** + *
+     * Status is FINISHED.
+     * 
+ * + * FINISHED = 6; + */ + FINISHED(6), + /** + *
+     * Status is FAILED.
+     * 
+ * + * FAILED = 7; + */ + FAILED(7), + /** + *
+     * Status is KILLED.
+     * 
+ * + * KILLED = 8; + */ + KILLED(8), + UNRECOGNIZED(-1), + ; + + /** + *
+     * Status is unspecified.
+     * 
+ * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + /** + *
+     * Status is NEW.
+     * 
+ * + * NEW = 1; + */ + public static final int NEW_VALUE = 1; + /** + *
+     * Status is NEW_SAVING.
+     * 
+ * + * NEW_SAVING = 2; + */ + public static final int NEW_SAVING_VALUE = 2; + /** + *
+     * Status is SUBMITTED.
+     * 
+ * + * SUBMITTED = 3; + */ + public static final int SUBMITTED_VALUE = 3; + /** + *
+     * Status is ACCEPTED.
+     * 
+ * + * ACCEPTED = 4; + */ + public static final int ACCEPTED_VALUE = 4; + /** + *
+     * Status is RUNNING.
+     * 
+ * + * RUNNING = 5; + */ + public static final int RUNNING_VALUE = 5; + /** + *
+     * Status is FINISHED.
+     * 
+ * + * FINISHED = 6; + */ + public static final int FINISHED_VALUE = 6; + /** + *
+     * Status is FAILED.
+     * 
+ * + * FAILED = 7; + */ + public static final int FAILED_VALUE = 7; + /** + *
+     * Status is KILLED.
+     * 
+ * + * KILLED = 8; + */ + public static final int KILLED_VALUE = 8; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + public static State forNumber(int value) { + switch (value) { + case 0: return STATE_UNSPECIFIED; + case 1: return NEW; + case 2: return NEW_SAVING; + case 3: return SUBMITTED; + case 4: return ACCEPTED; + case 5: return RUNNING; + case 6: return FINISHED; + case 7: return FAILED; + case 8: return KILLED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + State> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.YarnApplication.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1beta2.YarnApplication.State) + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
+   * Required. The application name.
+   * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * Required. The application name.
+   * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_FIELD_NUMBER = 2; + private int state_; + /** + *
+   * Required. The application state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public int getStateValue() { + return state_; + } + /** + *
+   * Required. The application state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.YarnApplication.State result = com.google.cloud.dataproc.v1beta2.YarnApplication.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.YarnApplication.State.UNRECOGNIZED : result; + } + + public static final int PROGRESS_FIELD_NUMBER = 3; + private float progress_; + /** + *
+   * Required. The numerical progress of the application, from 1 to 100.
+   * 
+ * + * float progress = 3; + */ + public float getProgress() { + return progress_; + } + + public static final int TRACKING_URL_FIELD_NUMBER = 4; + private volatile java.lang.Object trackingUrl_; + /** + *
+   * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+   * TimelineServer that provides application-specific information. The URL uses
+   * the internal hostname, and requires a proxy server for resolution and,
+   * possibly, access.
+   * 
+ * + * string tracking_url = 4; + */ + public java.lang.String getTrackingUrl() { + java.lang.Object ref = trackingUrl_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + trackingUrl_ = s; + return s; + } + } + /** + *
+   * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+   * TimelineServer that provides application-specific information. The URL uses
+   * the internal hostname, and requires a proxy server for resolution and,
+   * possibly, access.
+   * 
+ * + * string tracking_url = 4; + */ + public com.google.protobuf.ByteString + getTrackingUrlBytes() { + java.lang.Object ref = trackingUrl_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + trackingUrl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (state_ != com.google.cloud.dataproc.v1beta2.YarnApplication.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(2, state_); + } + if (progress_ != 0F) { + output.writeFloat(3, progress_); + } + if (!getTrackingUrlBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, trackingUrl_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (state_ != com.google.cloud.dataproc.v1beta2.YarnApplication.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, state_); + } + if (progress_ != 0F) { + size += com.google.protobuf.CodedOutputStream + .computeFloatSize(3, progress_); + } + if (!getTrackingUrlBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, trackingUrl_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1beta2.YarnApplication)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1beta2.YarnApplication other = (com.google.cloud.dataproc.v1beta2.YarnApplication) obj; + + boolean result = true; + result = result && getName() + .equals(other.getName()); + result = result && state_ == other.state_; + result = result && ( + java.lang.Float.floatToIntBits(getProgress()) + == java.lang.Float.floatToIntBits( + other.getProgress())); + result = result && getTrackingUrl() + .equals(other.getTrackingUrl()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + java.lang.Float.floatToIntBits( + getProgress()); + hash = (37 * hash) + TRACKING_URL_FIELD_NUMBER; + hash = (53 * hash) + getTrackingUrl().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.dataproc.v1beta2.YarnApplication parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.YarnApplication prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * A YARN application created by a job. Application information is a subset of
+   * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
+   * **Beta Feature**: This report is available for testing purposes only. It may
+   * be changed before final release.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.YarnApplication} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1beta2.YarnApplication) + com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_YarnApplication_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_YarnApplication_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.YarnApplication.class, com.google.cloud.dataproc.v1beta2.YarnApplication.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1beta2.YarnApplication.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + state_ = 0; + + progress_ = 0F; + + trackingUrl_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_YarnApplication_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.YarnApplication getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1beta2.YarnApplication.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.YarnApplication build() { + com.google.cloud.dataproc.v1beta2.YarnApplication result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.YarnApplication buildPartial() { + com.google.cloud.dataproc.v1beta2.YarnApplication result = new com.google.cloud.dataproc.v1beta2.YarnApplication(this); + result.name_ = name_; + result.state_ = state_; + result.progress_ = progress_; + result.trackingUrl_ = trackingUrl_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1beta2.YarnApplication) { + return mergeFrom((com.google.cloud.dataproc.v1beta2.YarnApplication)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1beta2.YarnApplication other) { + if (other == com.google.cloud.dataproc.v1beta2.YarnApplication.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (other.getProgress() != 0F) { + setProgress(other.getProgress()); + } + if (!other.getTrackingUrl().isEmpty()) { + trackingUrl_ = other.trackingUrl_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1beta2.YarnApplication parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1beta2.YarnApplication) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * Required. The application name.
+     * 
+ * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Required. The application name.
+     * 
+ * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Required. The application name.
+     * 
+ * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The application name.
+     * 
+ * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * Required. The application name.
+     * 
+ * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int state_ = 0; + /** + *
+     * Required. The application state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public int getStateValue() { + return state_; + } + /** + *
+     * Required. The application state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public Builder setStateValue(int value) { + state_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The application state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public com.google.cloud.dataproc.v1beta2.YarnApplication.State getState() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1beta2.YarnApplication.State result = com.google.cloud.dataproc.v1beta2.YarnApplication.State.valueOf(state_); + return result == null ? com.google.cloud.dataproc.v1beta2.YarnApplication.State.UNRECOGNIZED : result; + } + /** + *
+     * Required. The application state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public Builder setState(com.google.cloud.dataproc.v1beta2.YarnApplication.State value) { + if (value == null) { + throw new NullPointerException(); + } + + state_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Required. The application state.
+     * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + public Builder clearState() { + + state_ = 0; + onChanged(); + return this; + } + + private float progress_ ; + /** + *
+     * Required. The numerical progress of the application, from 1 to 100.
+     * 
+ * + * float progress = 3; + */ + public float getProgress() { + return progress_; + } + /** + *
+     * Required. The numerical progress of the application, from 1 to 100.
+     * 
+ * + * float progress = 3; + */ + public Builder setProgress(float value) { + + progress_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The numerical progress of the application, from 1 to 100.
+     * 
+ * + * float progress = 3; + */ + public Builder clearProgress() { + + progress_ = 0F; + onChanged(); + return this; + } + + private java.lang.Object trackingUrl_ = ""; + /** + *
+     * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+     * TimelineServer that provides application-specific information. The URL uses
+     * the internal hostname, and requires a proxy server for resolution and,
+     * possibly, access.
+     * 
+ * + * string tracking_url = 4; + */ + public java.lang.String getTrackingUrl() { + java.lang.Object ref = trackingUrl_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + trackingUrl_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+     * TimelineServer that provides application-specific information. The URL uses
+     * the internal hostname, and requires a proxy server for resolution and,
+     * possibly, access.
+     * 
+ * + * string tracking_url = 4; + */ + public com.google.protobuf.ByteString + getTrackingUrlBytes() { + java.lang.Object ref = trackingUrl_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + trackingUrl_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+     * TimelineServer that provides application-specific information. The URL uses
+     * the internal hostname, and requires a proxy server for resolution and,
+     * possibly, access.
+     * 
+ * + * string tracking_url = 4; + */ + public Builder setTrackingUrl( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + trackingUrl_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+     * TimelineServer that provides application-specific information. The URL uses
+     * the internal hostname, and requires a proxy server for resolution and,
+     * possibly, access.
+     * 
+ * + * string tracking_url = 4; + */ + public Builder clearTrackingUrl() { + + trackingUrl_ = getDefaultInstance().getTrackingUrl(); + onChanged(); + return this; + } + /** + *
+     * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+     * TimelineServer that provides application-specific information. The URL uses
+     * the internal hostname, and requires a proxy server for resolution and,
+     * possibly, access.
+     * 
+ * + * string tracking_url = 4; + */ + public Builder setTrackingUrlBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + trackingUrl_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.YarnApplication) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.YarnApplication) + private static final com.google.cloud.dataproc.v1beta2.YarnApplication DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.YarnApplication(); + } + + public static com.google.cloud.dataproc.v1beta2.YarnApplication getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public YarnApplication parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new YarnApplication(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1beta2.YarnApplication getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplicationOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplicationOrBuilder.java new file mode 100644 index 000000000000..3943fb728e01 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplicationOrBuilder.java @@ -0,0 +1,77 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package com.google.cloud.dataproc.v1beta2; + +public interface YarnApplicationOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1beta2.YarnApplication) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The application name.
+   * 
+ * + * string name = 1; + */ + java.lang.String getName(); + /** + *
+   * Required. The application name.
+   * 
+ * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * Required. The application state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + int getStateValue(); + /** + *
+   * Required. The application state.
+   * 
+ * + * .google.cloud.dataproc.v1beta2.YarnApplication.State state = 2; + */ + com.google.cloud.dataproc.v1beta2.YarnApplication.State getState(); + + /** + *
+   * Required. The numerical progress of the application, from 1 to 100.
+   * 
+ * + * float progress = 3; + */ + float getProgress(); + + /** + *
+   * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+   * TimelineServer that provides application-specific information. The URL uses
+   * the internal hostname, and requires a proxy server for resolution and,
+   * possibly, access.
+   * 
+ * + * string tracking_url = 4; + */ + java.lang.String getTrackingUrl(); + /** + *
+   * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
+   * TimelineServer that provides application-specific information. The URL uses
+   * the internal hostname, and requires a proxy server for resolution and,
+   * possibly, access.
+   * 
+ * + * string tracking_url = 4; + */ + com.google.protobuf.ByteString + getTrackingUrlBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto new file mode 100644 index 000000000000..0fb03e3f751b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto @@ -0,0 +1,712 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1beta2; + +import "google/api/annotations.proto"; +import "google/cloud/dataproc/v1beta2/shared.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "ClustersProto"; +option java_package = "com.google.cloud.dataproc.v1beta2"; + + +// The ClusterControllerService provides methods to manage clusters +// of Compute Engine instances. +service ClusterController { + // Creates a cluster in a project. + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/projects/{project_id}/regions/{region}/clusters" + body: "cluster" + }; + } + + // Updates a cluster in a project. + rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" + body: "cluster" + }; + } + + // Deletes a cluster in a project. + rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" + }; + } + + // Gets the resource representation for a cluster in a project. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { + get: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" + }; + } + + // Lists all regions/{region}/clusters in a project. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { + get: "/v1beta2/projects/{project_id}/regions/{region}/clusters" + }; + } + + // Gets cluster diagnostic information. + // After the operation completes, the Operation.response field + // contains `DiagnoseClusterOutputLocation`. + rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" + body: "*" + }; + } +} + +// Describes the identifying information, config, and status of +// a cluster of Compute Engine instances. +message Cluster { + // Required. The Google Cloud Platform project ID that the cluster belongs to. + string project_id = 1; + + // Required. The cluster name. Cluster names within a project must be + // unique. Names of deleted clusters can be reused. + string cluster_name = 2; + + // Required. The cluster config. Note that Cloud Dataproc may set + // default values, and values may change when clusters are updated. + ClusterConfig config = 3; + + // Optional. The labels to associate with this cluster. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // No more than 32 labels can be associated with a cluster. + map labels = 8; + + // Output only. Cluster status. + ClusterStatus status = 4; + + // Output only. The previous cluster status. + repeated ClusterStatus status_history = 7; + + // Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc + // generates this value when it creates the cluster. + string cluster_uuid = 6; + + // Contains cluster daemon metrics such as HDFS and YARN stats. + // + // **Beta Feature**: This report is available for testing purposes only. It may + // be changed before final release. + ClusterMetrics metrics = 9; +} + +// The cluster config. +message ClusterConfig { + // Optional. A Cloud Storage staging bucket used for sharing generated + // SSH keys and config. If you do not specify a staging bucket, Cloud + // Dataproc will determine an appropriate Cloud Storage location (US, + // ASIA, or EU) for your cluster's staging bucket according to the Google + // Compute Engine zone where your cluster is deployed, and then it will create + // and manage this project-level, per-location bucket for you. + string config_bucket = 1; + + // Required. The shared Compute Engine config settings for + // all instances in a cluster. + GceClusterConfig gce_cluster_config = 8; + + // Optional. The Compute Engine config settings for + // the master instance in a cluster. + InstanceGroupConfig master_config = 9; + + // Optional. The Compute Engine config settings for + // worker instances in a cluster. + InstanceGroupConfig worker_config = 10; + + // Optional. The Compute Engine config settings for + // additional worker instances in a cluster. + InstanceGroupConfig secondary_worker_config = 12; + + // Optional. The config settings for software inside the cluster. + SoftwareConfig software_config = 13; + + // Optional. The config setting for auto delete cluster schedule. + LifecycleConfig lifecycle_config = 14; + + // Optional. Commands to execute on each node after config is + // completed. By default, executables are run on master and all worker nodes. + // You can test a node's role metadata to run an executable on + // a master or worker node, as shown below using `curl` (you can also use `wget`): + // + // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + repeated NodeInitializationAction initialization_actions = 11; +} + +// Common config settings for resources of Compute Engine cluster +// instances, applicable to all instances in the cluster. +message GceClusterConfig { + // Optional. The zone where the Compute Engine cluster will be located. + // On a create request, it is required in the "global" region. If omitted + // in a non-global Cloud Dataproc region, the service will pick a zone in the + // corresponding Compute Engine region. On a get request, zone will always be + // present. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` + // * `projects/[project_id]/zones/[zone]` + // * `us-central1-f` + string zone_uri = 1; + + // Optional. The Compute Engine network to be used for machine + // communications. Cannot be specified with subnetwork_uri. If neither + // `network_uri` nor `subnetwork_uri` is specified, the "default" network of + // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see + // [Using Subnetworks](/compute/docs/subnetworks) for more information). + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` + // * `projects/[project_id]/regions/global/default` + // * `default` + string network_uri = 2; + + // Optional. The Compute Engine subnetwork to be used for machine + // communications. Cannot be specified with network_uri. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0` + // * `projects/[project_id]/regions/us-east1/sub0` + // * `sub0` + string subnetwork_uri = 6; + + // Optional. If true, all instances in the cluster will only have internal IP + // addresses. By default, clusters are not restricted to internal IP addresses, + // and will have ephemeral external IP addresses assigned to each instance. + // This `internal_ip_only` restriction can only be enabled for subnetwork + // enabled networks, and all off-cluster dependencies must be configured to be + // accessible without external IP addresses. + bool internal_ip_only = 7; + + // Optional. The service account of the instances. Defaults to the default + // Compute Engine service account. Custom service accounts need + // permissions equivalent to the following IAM roles: + // + // * roles/logging.logWriter + // * roles/storage.objectAdmin + // + // (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + // for more information). + // Example: `[account_id]@[project_id].iam.gserviceaccount.com` + string service_account = 8; + + // Optional. The URIs of service account scopes to be included in + // Compute Engine instances. The following base set of scopes is always + // included: + // + // * https://www.googleapis.com/auth/cloud.useraccounts.readonly + // * https://www.googleapis.com/auth/devstorage.read_write + // * https://www.googleapis.com/auth/logging.write + // + // If no scopes are specified, the following defaults are also provided: + // + // * https://www.googleapis.com/auth/bigquery + // * https://www.googleapis.com/auth/bigtable.admin.table + // * https://www.googleapis.com/auth/bigtable.data + // * https://www.googleapis.com/auth/devstorage.full_control + repeated string service_account_scopes = 3; + + // The Compute Engine tags to add to all instances (see + // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). + repeated string tags = 4; + + // The Compute Engine metadata entries to add to all instances (see + // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + map metadata = 5; +} + +// Optional. The config settings for Compute Engine resources in +// an instance group, such as a master or worker group. +message InstanceGroupConfig { + // Optional. The number of VM instances in the instance group. + // For master instance groups, must be set to 1. + int32 num_instances = 1; + + // Output only. The list of instance names. Cloud Dataproc derives the names + // from `cluster_name`, `num_instances`, and the instance group. + repeated string instance_names = 2; + + // Output only. The Compute Engine image resource used for cluster + // instances. Inferred from `SoftwareConfig.image_version`. + string image_uri = 3; + + // Optional. The Compute Engine machine type used for cluster instances. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `n1-standard-2` + // + // **Auto Zone Exception**: If you are using the Cloud Dataproc + // [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // feature, you must use the short name of the machine type + // resource, for example, `n1-standard-2`. + string machine_type_uri = 4; + + // Optional. Disk option config settings. + DiskConfig disk_config = 5; + + // Optional. Specifies that this instance group contains preemptible instances. + bool is_preemptible = 6; + + // Output only. The config for Compute Engine Instance Group + // Manager that manages this group. + // This is only used for preemptible instance groups. + ManagedGroupConfig managed_group_config = 7; + + // Optional. The Compute Engine accelerator configuration for these + // instances. + // + // **Beta Feature**: This feature is still under development. It may be + // changed before final release. + repeated AcceleratorConfig accelerators = 8; + + // Optional. Specifies the minimum cpu platform for the Instance Group. + // See [Cloud Dataproc→Minimum CPU Platform] + // (/dataproc/docs/concepts/compute/dataproc-min-cpu). + string min_cpu_platform = 9; +} + +// Specifies the resources used to actively manage an instance group. +message ManagedGroupConfig { + // Output only. The name of the Instance Template used for the Managed + // Instance Group. + string instance_template_name = 1; + + // Output only. The name of the Instance Group Manager for this group. + string instance_group_manager_name = 2; +} + +// Specifies the type and number of accelerator cards attached to the instances +// of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)). +message AcceleratorConfig { + // Full URL, partial URI, or short name of the accelerator type resource to + // expose to this instance. See [Compute Engine AcceleratorTypes]( + // /compute/docs/reference/beta/acceleratorTypes) + // + // Examples + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `nvidia-tesla-k80` + // + // **Auto Zone Exception**: If you are using the Cloud Dataproc + // [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // feature, you must use the short name of the accelerator type + // resource, for example, `nvidia-tesla-k80`. + string accelerator_type_uri = 1; + + // The number of the accelerator cards of this type exposed to this instance. + int32 accelerator_count = 2; +} + +// Specifies the config of disk options for a group of VM instances. +message DiskConfig { + // Optional. Type of the boot disk (default is "pd-standard"). + // Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or + // "pd-standard" (Persistent Disk Hard Disk Drive). + string boot_disk_type = 3; + + // Optional. Size in GB of the boot disk (default is 500GB). + int32 boot_disk_size_gb = 1; + + // Optional. Number of attached SSDs, from 0 to 4 (default is 0). + // If SSDs are not attached, the boot disk is used to store runtime logs and + // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. + // If one or more SSDs are attached, this runtime bulk + // data is spread across them, and the boot disk contains only basic + // config and installed binaries. + int32 num_local_ssds = 2; +} + +// Specifies the cluster auto delete related schedule configuration. +message LifecycleConfig { + // Optional. The longest duration that cluster would keep alive while staying + // idle; passing this threshold will cause cluster to be auto-deleted. + google.protobuf.Duration idle_delete_ttl = 1; + + // Optional. Either the exact time the cluster should be deleted at or + // the cluster maximum age. + oneof ttl { + // Optional. The time when cluster will be auto-deleted. + google.protobuf.Timestamp auto_delete_time = 2; + + // Optional. The life duration of cluster, the cluster will be auto-deleted + // at the end of this duration. + google.protobuf.Duration auto_delete_ttl = 3; + } +} + +// Specifies an executable to run on a fully configured node and a +// timeout period for executable completion. +message NodeInitializationAction { + // Required. Cloud Storage URI of executable file. + string executable_file = 1; + + // Optional. Amount of time executable has to complete. Default is + // 10 minutes. Cluster creation fails with an explanatory error message (the + // name of the executable that caused the error and the exceeded timeout + // period) if the executable is not completed at end of the timeout period. + google.protobuf.Duration execution_timeout = 2; +} + +// The status of a cluster and its instances. +message ClusterStatus { + // The cluster state. + enum State { + // The cluster state is unknown. + UNKNOWN = 0; + + // The cluster is being created and set up. It is not ready for use. + CREATING = 1; + + // The cluster is currently running and healthy. It is ready for use. + RUNNING = 2; + + // The cluster encountered an error. It is not ready for use. + ERROR = 3; + + // The cluster is being deleted. It cannot be used. + DELETING = 4; + + // The cluster is being updated. It continues to accept and process jobs. + UPDATING = 5; + } + + // The cluster substate. + enum Substate { + // The cluster substate is unknown. + UNSPECIFIED = 0; + + // The cluster is known to be in an unhealthy state + // (for example, critical daemons are not running or HDFS capacity is + // exhausted). + // + // Applies to RUNNING state. + UNHEALTHY = 1; + + // The agent-reported status is out of date (may occur if + // Cloud Dataproc loses communication with Agent). + // + // Applies to RUNNING state. + STALE_STATUS = 2; + } + + // Output only. The cluster's state. + State state = 1; + + // Output only. Optional details of cluster's state. + string detail = 2; + + // Output only. Time when this state was entered. + google.protobuf.Timestamp state_start_time = 3; + + // Output only. Additional state information that includes + // status reported by the agent. + Substate substate = 4; +} + +// Specifies the selection and config of software inside the cluster. +message SoftwareConfig { + // Optional. The version of software inside the cluster. It must be one of the supported + // [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + // such as "1.2" (including a subminor version, such as "1.2.29"), or the + // ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). + // If unspecified, it defaults to the latest version. + string image_version = 1; + + // Optional. The properties to set on daemon config files. + // + // Property keys are specified in `prefix:property` format, such as + // `core:fs.defaultFS`. The following are supported prefixes + // and their mappings: + // + // * capacity-scheduler: `capacity-scheduler.xml` + // * core: `core-site.xml` + // * distcp: `distcp-default.xml` + // * hdfs: `hdfs-site.xml` + // * hive: `hive-site.xml` + // * mapred: `mapred-site.xml` + // * pig: `pig.properties` + // * spark: `spark-defaults.conf` + // * yarn: `yarn-site.xml` + // + // For more information, see + // [Cluster properties](/dataproc/docs/concepts/cluster-properties). + map properties = 2; +} + +// Contains cluster daemon metrics, such as HDFS and YARN stats. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +message ClusterMetrics { + // The HDFS metrics. + map hdfs_metrics = 1; + + // The YARN metrics. + map yarn_metrics = 2; +} + +// A request to create a cluster. +message CreateClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The cluster to create. + Cluster cluster = 2; + + // Optional. A unique id used to identify the request. If the server + // receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests with the same + // id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend + // is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 4; +} + +// A request to update a cluster. +message UpdateClusterRequest { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 5; + + // Required. The cluster name. + string cluster_name = 2; + + // Required. The changes to the cluster. + Cluster cluster = 3; + + // Optional. Timeout for graceful YARN decomissioning. Graceful + // decommissioning allows removing nodes from the cluster without + // interrupting jobs in progress. Timeout specifies how long to wait for jobs + // in progress to finish before forcefully removing nodes (and potentially + // interrupting jobs). Default timeout is 0 (for forceful decommission), and + // the maximum allowed timeout is 1 day. + // + // Only supported on Dataproc image versions 1.2 and higher. + google.protobuf.Duration graceful_decommission_timeout = 6; + + // Required. Specifies the path, relative to `Cluster`, of + // the field to update. For example, to change the number of workers + // in a cluster to 5, the `update_mask` parameter would be + // specified as `config.worker_config.num_instances`, + // and the `PATCH` request body would specify the new value, as follows: + // + // { + // "config":{ + // "workerConfig":{ + // "numInstances":"5" + // } + // } + // } + // + // Similarly, to change the number of preemptible workers in a cluster to 5, the + // `update_mask` parameter would be `config.secondary_worker_config.num_instances`, + // and the `PATCH` request body would be set as follows: + // + // { + // "config":{ + // "secondaryWorkerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Note: currently only the following fields can be updated: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + //
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL duration
+ google.protobuf.FieldMask update_mask = 4; + + // Optional. A unique id used to identify the request. If the server + // receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests with the same + // id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the + // backend is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 7; +} + +// A request to delete a cluster. +message DeleteClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The cluster name. + string cluster_name = 2; + + // Optional. Specifying the `cluster_uuid` means the RPC should fail + // (with error NOT_FOUND) if cluster with specified UUID does not exist. + string cluster_uuid = 4; + + // Optional. A unique id used to identify the request. If the server + // receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests with the same + // id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the + // backend is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5; +} + +// Request to get the resource representation for a cluster in a project. +message GetClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The cluster name. + string cluster_name = 2; +} + +// A request to list the clusters in a project. +message ListClustersRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 4; + + // Optional. A filter constraining the clusters to list. Filters are + // case-sensitive and have the following syntax: + // + // field = value [AND [field = value]] ... + // + // where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + // and `[KEY]` is a label key. **value** can be `*` to match all values. + // `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + // `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + // contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + // contains the `DELETING` and `ERROR` states. + // `clusterName` is the name of the cluster provided at creation time. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND clusterName = mycluster + // AND labels.env = staging AND labels.starred = * + string filter = 5; + + // Optional. The standard List page size. + int32 page_size = 2; + + // Optional. The standard List page token. + string page_token = 3; +} + +// The list of all clusters in a project. +message ListClustersResponse { + // Output only. The clusters in the project. + repeated Cluster clusters = 1; + + // Output only. This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListClustersRequest. + string next_page_token = 2; +} + +// A request to collect cluster diagnostic information. +message DiagnoseClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The cluster name. + string cluster_name = 2; +} + +// The location of diagnostic output. +message DiagnoseClusterResults { + // Output only. The Cloud Storage URI of the diagnostic output. + // The output report is a plain text file with a summary of collected + // diagnostics. + string output_uri = 1; +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto new file mode 100644 index 000000000000..d5635583ecc4 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto @@ -0,0 +1,767 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1beta2; + +import "google/api/annotations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "JobsProto"; +option java_package = "com.google.cloud.dataproc.v1beta2"; + + +// The JobController provides methods to manage jobs. +service JobController { + // Submits a job to a cluster. + rpc SubmitJob(SubmitJobRequest) returns (Job) { + option (google.api.http) = { + post: "/v1beta2/projects/{project_id}/regions/{region}/jobs:submit" + body: "*" + }; + } + + // Gets the resource representation for a job in a project. + rpc GetJob(GetJobRequest) returns (Job) { + option (google.api.http) = { + get: "/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}" + }; + } + + // Lists regions/{region}/jobs in a project. + rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) { + option (google.api.http) = { + get: "/v1beta2/projects/{project_id}/regions/{region}/jobs" + }; + } + + // Updates a job in a project. + rpc UpdateJob(UpdateJobRequest) returns (Job) { + option (google.api.http) = { + patch: "/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}" + body: "job" + }; + } + + // Starts a job cancellation request. To access the job resource + // after cancellation, call + // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + rpc CancelJob(CancelJobRequest) returns (Job) { + option (google.api.http) = { + post: "/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" + body: "*" + }; + } + + // Deletes the job from the project. If the job is active, the delete fails, + // and the response returns `FAILED_PRECONDITION`. + rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}" + }; + } +} + +// The runtime logging config of the job. +message LoggingConfig { + // The Log4j level for job execution. When running an + // [Apache Hive](http://hive.apache.org/) job, Cloud + // Dataproc configures the Hive client to an equivalent verbosity level. + enum Level { + // Level is unspecified. Use default level for log4j. + LEVEL_UNSPECIFIED = 0; + + // Use ALL level for log4j. + ALL = 1; + + // Use TRACE level for log4j. + TRACE = 2; + + // Use DEBUG level for log4j. + DEBUG = 3; + + // Use INFO level for log4j. + INFO = 4; + + // Use WARN level for log4j. + WARN = 5; + + // Use ERROR level for log4j. + ERROR = 6; + + // Use FATAL level for log4j. + FATAL = 7; + + // Turn off log4j. + OFF = 8; + } + + // The per-package log levels for the driver. This may include + // "root" package name to configure rootLogger. + // Examples: + // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + map driver_log_levels = 2; +} + +// A Cloud Dataproc job for running +// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) +// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). +message HadoopJob { + // Required. Indicates the location of the driver's main class. Specify + // either the jar file that contains the main class or the main class name. + // To specify both, add the jar file to `jar_file_uris`, and then specify + // the main class name in this property. + oneof driver { + // The HCFS URI of the jar file containing the main class. + // Examples: + // 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' + // 'hdfs:/tmp/test-samples/custom-wordcount.jar' + // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + string main_jar_file_uri = 1; + + // The name of the driver's main class. The jar file containing the class + // must be in the default CLASSPATH or specified in `jar_file_uris`. + string main_class = 2; + } + + // Optional. The arguments to pass to the driver. Do not + // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job + // properties, since a collision may occur that causes an incorrect job + // submission. + repeated string args = 3; + + // Optional. Jar file URIs to add to the CLASSPATHs of the + // Hadoop driver and tasks. + repeated string jar_file_uris = 4; + + // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied + // to the working directory of Hadoop drivers and distributed tasks. Useful + // for naively parallel tasks. + repeated string file_uris = 5; + + // Optional. HCFS URIs of archives to be extracted in the working directory of + // Hadoop drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, or .zip. + repeated string archive_uris = 6; + + // Optional. A mapping of property names to values, used to configure Hadoop. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site and + // classes in user code. + map properties = 7; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 8; +} + +// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) +// applications on YARN. +message SparkJob { + // Required. The specification of the main method to call to drive the job. + // Specify either the jar file that contains the main class or the main class + // name. To pass both a main jar and a main class in that jar, add the jar to + // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. + oneof driver { + // The HCFS URI of the jar file that contains the main class. + string main_jar_file_uri = 1; + + // The name of the driver's main class. The jar file that contains the class + // must be in the default CLASSPATH or specified in `jar_file_uris`. + string main_class = 2; + } + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 3; + + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the + // Spark driver and tasks. + repeated string jar_file_uris = 4; + + // Optional. HCFS URIs of files to be copied to the working directory of + // Spark drivers and distributed tasks. Useful for naively parallel tasks. + repeated string file_uris = 5; + + // Optional. HCFS URIs of archives to be extracted in the working directory + // of Spark drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 6; + + // Optional. A mapping of property names to values, used to configure Spark. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 7; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 8; +} + +// A Cloud Dataproc job for running +// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) +// applications on YARN. +message PySparkJob { + // Required. The HCFS URI of the main Python file to use as the driver. Must + // be a .py file. + string main_python_file_uri = 1; + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 2; + + // Optional. HCFS file URIs of Python files to pass to the PySpark + // framework. Supported file types: .py, .egg, and .zip. + repeated string python_file_uris = 3; + + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the + // Python driver and tasks. + repeated string jar_file_uris = 4; + + // Optional. HCFS URIs of files to be copied to the working directory of + // Python drivers and distributed tasks. Useful for naively parallel tasks. + repeated string file_uris = 5; + + // Optional. HCFS URIs of archives to be extracted in the working directory of + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 6; + + // Optional. A mapping of property names to values, used to configure PySpark. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 7; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 8; +} + +// A list of queries to run on a cluster. +message QueryList { + // Required. The queries to execute. You do not need to terminate a query + // with a semicolon. Multiple queries can be specified in one string + // by separating each with a semicolon. Here is an example of an Cloud + // Dataproc API snippet that uses a QueryList to specify a HiveJob: + // + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + repeated string queries = 1; +} + +// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) +// queries on YARN. +message HiveJob { + // Required. The sequence of Hive queries to execute, specified as either + // an HCFS file URI or a list of queries. + oneof queries { + // The HCFS URI of the script that contains Hive queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when executing + // independent parallel queries. + bool continue_on_failure = 3; + + // Optional. Mapping of query variable names to values (equivalent to the + // Hive command: `SET name="value";`). + map script_variables = 4; + + // Optional. A mapping of property names and values, used to configure Hive. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/hive/conf/hive-site.xml, and classes in user code. + map properties = 5; + + // Optional. HCFS URIs of jar files to add to the CLASSPATH of the + // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes + // and UDFs. + repeated string jar_file_uris = 6; +} + +// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) +// queries. +message SparkSqlJob { + // Required. The sequence of Spark SQL queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Mapping of query variable names to values (equivalent to the + // Spark SQL command: SET `name="value";`). + map script_variables = 3; + + // Optional. A mapping of property names to values, used to configure + // Spark SQL's SparkConf. Properties that conflict with values set by the + // Cloud Dataproc API may be overwritten. + map properties = 4; + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + repeated string jar_file_uris = 56; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 6; +} + +// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) +// queries on YARN. +message PigJob { + // Required. The sequence of Pig queries to execute, specified as an HCFS + // file URI or a list of queries. + oneof queries { + // The HCFS URI of the script that contains the Pig queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when executing + // independent parallel queries. + bool continue_on_failure = 3; + + // Optional. Mapping of query variable names to values (equivalent to the Pig + // command: `name=[value]`). + map script_variables = 4; + + // Optional. A mapping of property names to values, used to configure Pig. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/pig/conf/pig.properties, and classes in user code. + map properties = 5; + + // Optional. HCFS URIs of jar files to add to the CLASSPATH of + // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + repeated string jar_file_uris = 6; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7; +} + +// Cloud Dataproc job config. +message JobPlacement { + // Required. The name of the cluster where the job will be submitted. + string cluster_name = 1; + + // Output only. A cluster UUID generated by the Cloud Dataproc service when + // the job is submitted. + string cluster_uuid = 2; +} + +// Cloud Dataproc job status. +message JobStatus { + // The job state. + enum State { + // The job state is unknown. + STATE_UNSPECIFIED = 0; + + // The job is pending; it has been submitted, but is not yet running. + PENDING = 1; + + // Job has been received by the service and completed initial setup; + // it will soon be submitted to the cluster. + SETUP_DONE = 8; + + // The job is running on the cluster. + RUNNING = 2; + + // A CancelJob request has been received, but is pending. + CANCEL_PENDING = 3; + + // Transient in-flight resources have been canceled, and the request to + // cancel the running job has been issued to the cluster. + CANCEL_STARTED = 7; + + // The job cancellation was successful. + CANCELLED = 4; + + // The job has completed successfully. + DONE = 5; + + // The job has completed, but encountered an error. + ERROR = 6; + + // Job attempt has failed. The detail field contains failure details for + // this attempt. + // + // Applies to restartable jobs only. + ATTEMPT_FAILURE = 9; + } + + // The job substate. + enum Substate { + // The job substate is unknown. + UNSPECIFIED = 0; + + // The Job is submitted to the agent. + // + // Applies to RUNNING state. + SUBMITTED = 1; + + // The Job has been received and is awaiting execution (it may be waiting + // for a condition to be met). See the "details" field for the reason for + // the delay. + // + // Applies to RUNNING state. + QUEUED = 2; + + // The agent-reported status is out of date, which may be caused by a + // loss of communication between the agent and Cloud Dataproc. If the + // agent does not send a timely update, the job will fail. + // + // Applies to RUNNING state. + STALE_STATUS = 3; + } + + // Output only. A state message specifying the overall job state. + State state = 1; + + // Output only. Optional job state details, such as an error + // description if the state is ERROR. + string details = 2; + + // Output only. The time when this state was entered. + google.protobuf.Timestamp state_start_time = 6; + + // Output only. Additional state information, which includes + // status reported by the agent. + Substate substate = 7; +} + +// Encapsulates the full scoping used to reference a job. +message JobReference { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Optional. The job ID, which must be unique within the project. The job ID + // is generated by the server upon job submission or provided by the user as a + // means to perform retries without creating duplicate jobs. The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or + // hyphens (-). The maximum length is 100 characters. + string job_id = 2; +} + +// A YARN application created by a job. Application information is a subset of +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +message YarnApplication { + // The application state, corresponding to + // YarnProtos.YarnApplicationStateProto. + enum State { + // Status is unspecified. + STATE_UNSPECIFIED = 0; + + // Status is NEW. + NEW = 1; + + // Status is NEW_SAVING. + NEW_SAVING = 2; + + // Status is SUBMITTED. + SUBMITTED = 3; + + // Status is ACCEPTED. + ACCEPTED = 4; + + // Status is RUNNING. + RUNNING = 5; + + // Status is FINISHED. + FINISHED = 6; + + // Status is FAILED. + FAILED = 7; + + // Status is KILLED. + KILLED = 8; + } + + // Required. The application name. + string name = 1; + + // Required. The application state. + State state = 2; + + // Required. The numerical progress of the application, from 1 to 100. + float progress = 3; + + // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or + // TimelineServer that provides application-specific information. The URL uses + // the internal hostname, and requires a proxy server for resolution and, + // possibly, access. + string tracking_url = 4; +} + +// A Cloud Dataproc job resource. +message Job { + // Optional. The fully qualified reference to the job, which can be used to + // obtain the equivalent REST path of the job resource. If this property + // is not specified when a job is created, the server generates a + // job_id. + JobReference reference = 1; + + // Required. Job information, including how, when, and where to + // run the job. + JobPlacement placement = 2; + + // Required. The application/framework-specific portion of the job. + oneof type_job { + // Job is a Hadoop job. + HadoopJob hadoop_job = 3; + + // Job is a Spark job. + SparkJob spark_job = 4; + + // Job is a Pyspark job. + PySparkJob pyspark_job = 5; + + // Job is a Hive job. + HiveJob hive_job = 6; + + // Job is a Pig job. + PigJob pig_job = 7; + + // Job is a SparkSql job. + SparkSqlJob spark_sql_job = 12; + } + + // Output only. The job status. Additional application-specific + // status information may be contained in the type_job + // and yarn_applications fields. + JobStatus status = 8; + + // Output only. The previous job status. + repeated JobStatus status_history = 13; + + // Output only. The collection of YARN applications spun up by this job. + // + // **Beta** Feature: This report is available for testing purposes only. It may + // be changed before final release. + repeated YarnApplication yarn_applications = 9; + + // Output only. A URI pointing to the location of the stdout of the job's + // driver program. + string driver_output_resource_uri = 17; + + // Output only. If present, the location of miscellaneous control files + // which may be used as part of job setup and handling. If not present, + // control files may be placed in the same location as `driver_output_uri`. + string driver_control_files_uri = 15; + + // Optional. The labels to associate with this job. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // No more than 32 labels can be associated with a job. + map labels = 18; + + // Optional. Job scheduling configuration. + JobScheduling scheduling = 20; +} + +// Job scheduling options. +message JobScheduling { + // Optional. Maximum number of times per hour a driver may be restarted as + // a result of driver terminating with non-zero code before job is + // reported failed. + // + // A job may be reported as thrashing if driver exits with non-zero code + // 4 times within 10 minute window. + // + // Maximum value is 10. + int32 max_failures_per_hour = 1; +} + +// A request to submit a job. +message SubmitJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The job resource. + Job job = 2; + + // Optional. A unique id used to identify the request. If the server + // receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests with the same + // id, then the second request will be ignored and the + // first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend + // is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 4; +} + +// A request to get the resource representation for a job in a project. +message GetJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The job ID. + string job_id = 2; +} + +// A request to list jobs in a project. +message ListJobsRequest { + // A matcher that specifies categories of job states. + enum JobStateMatcher { + // Match all jobs, regardless of state. + ALL = 0; + + // Only match jobs in non-terminal states: PENDING, RUNNING, or + // CANCEL_PENDING. + ACTIVE = 1; + + // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. + NON_ACTIVE = 2; + } + + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 6; + + // Optional. The number of results to return in each response. + int32 page_size = 2; + + // Optional. The page token, returned by a previous call, to request the + // next page of results. + string page_token = 3; + + // Optional. If set, the returned jobs list includes only jobs that were + // submitted to the named cluster. + string cluster_name = 4; + + // Optional. Specifies enumerated categories of jobs to list. + // (default = match ALL jobs). + // + // If `filter` is provided, `jobStateMatcher` will be ignored. + JobStateMatcher job_state_matcher = 5; + + // Optional. A filter constraining the jobs to list. Filters are + // case-sensitive and have the following syntax: + // + // [field = value] AND [field [= value]] ... + // + // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + // key. **value** can be `*` to match all values. + // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND labels.env = staging AND labels.starred = * + string filter = 7; +} + +// A request to update a job. +message UpdateJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 2; + + // Required. The job ID. + string job_id = 3; + + // Required. The changes to the job. + Job job = 4; + + // Required. Specifies the path, relative to Job, of + // the field to update. For example, to update the labels of a Job the + // update_mask parameter would be specified as + // labels, and the `PATCH` request body would specify the new + // value. Note: Currently, labels is the only + // field that can be updated. + google.protobuf.FieldMask update_mask = 5; +} + +// A list of jobs in a project. +message ListJobsResponse { + // Output only. Jobs list. + repeated Job jobs = 1; + + // Optional. This token is included in the response if there are more results + // to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListJobsRequest. + string next_page_token = 2; +} + +// A request to cancel a job. +message CancelJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The job ID. + string job_id = 2; +} + +// A request to delete a job. +message DeleteJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1; + + // Required. The Cloud Dataproc region in which to handle the request. + string region = 3; + + // Required. The job ID. + string job_id = 2; +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto new file mode 100644 index 000000000000..8c428dae2190 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto @@ -0,0 +1,83 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1beta2; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "OperationsProto"; +option java_package = "com.google.cloud.dataproc.v1beta2"; + + +// The status of the operation. +message ClusterOperationStatus { + // The operation state. + enum State { + // Unused. + UNKNOWN = 0; + + // The operation has been created. + PENDING = 1; + + // The operation is running. + RUNNING = 2; + + // The operation is done; either cancelled or completed. + DONE = 3; + } + + // Output only. A message containing the operation state. + State state = 1; + + // Output only. A message containing the detailed operation state. + string inner_state = 2; + + // Output only. A message containing any operation metadata details. + string details = 3; + + // Output only. The time this state was entered. + google.protobuf.Timestamp state_start_time = 4; +} + +// Metadata describing the operation. +message ClusterOperationMetadata { + // Output only. Name of the cluster for the operation. + string cluster_name = 7; + + // Output only. Cluster UUID for the operation. + string cluster_uuid = 8; + + // Output only. Current operation status. + ClusterOperationStatus status = 9; + + // Output only. The previous operation status. + repeated ClusterOperationStatus status_history = 10; + + // Output only. The operation type. + string operation_type = 11; + + // Output only. Short description of operation. + string description = 12; + + // Output only. Labels associated with the operation + map labels = 13; + + // Output only. Errors encountered during operation execution. + repeated string warnings = 14; +} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto new file mode 100644 index 000000000000..801708a52c87 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto @@ -0,0 +1,25 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1beta2; + +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "SharedProto"; +option java_package = "com.google.cloud.dataproc.v1beta2"; + diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto new file mode 100644 index 000000000000..4db43168c813 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto @@ -0,0 +1,544 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1beta2; + +import "google/api/annotations.proto"; +import "google/cloud/dataproc/v1beta2/clusters.proto"; +import "google/cloud/dataproc/v1beta2/jobs.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "WorkflowTemplatesProto"; +option java_package = "com.google.cloud.dataproc.v1beta2"; + + +// The API interface for managing Workflow Templates in the +// Cloud Dataproc API. +service WorkflowTemplateService { + // Creates new workflow template. + rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) { + option (google.api.http) = { + post: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates" + body: "template" + additional_bindings { + post: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates" + body: "template" + } + }; + } + + // Retrieves the latest workflow template. + // + // Can retrieve previously instantiated template by specifying optional + // version parameter. + rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) { + option (google.api.http) = { + get: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}" + additional_bindings { + get: "/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}" + } + }; + } + + // Instantiates a template and begins execution. + // + // The returned Operation can be used to track execution of + // workflow by polling + // [operations.get][google.longrunning.Operations.GetOperation]. + // The Operation will complete when entire workflow is finished. + // + // The running workflow can be aborted via + // [operations.cancel][google.longrunning.Operations.CancelOperation]. + // This will cause any inflight jobs to be cancelled and workflow-owned + // clusters to be deleted. + // + // The [Operation.metadata][google.longrunning.Operation.metadata] will be + // [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + // + // On successful completion, + // [Operation.response][google.longrunning.Operation.response] will be + // [Empty][google.protobuf.Empty]. + rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate" + body: "*" + additional_bindings { + post: "/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}:instantiate" + body: "*" + } + }; + } + + // Instantiates a template and begins execution. + // + // This method is equivalent to executing the sequence + // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + // [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + // + // The returned Operation can be used to track execution of + // workflow by polling + // [operations.get][google.longrunning.Operations.GetOperation]. + // The Operation will complete when entire workflow is finished. + // + // The running workflow can be aborted via + // [operations.cancel][google.longrunning.Operations.CancelOperation]. + // This will cause any inflight jobs to be cancelled and workflow-owned + // clusters to be deleted. + // + // The [Operation.metadata][google.longrunning.Operation.metadata] will be + // [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + // + // On successful completion, + // [Operation.response][google.longrunning.Operation.response] will be + // [Empty][google.protobuf.Empty]. + rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline" + body: "template" + additional_bindings { + post: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline" + body: "template" + } + }; + } + + // Updates (replaces) workflow template. The updated template + // must contain version that matches the current server version. + rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) { + option (google.api.http) = { + put: "/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}" + body: "template" + additional_bindings { + put: "/v1beta2/{template.name=projects/*/locations/*/workflowTemplates/*}" + body: "template" + } + }; + } + + // Lists workflows that match the specified filter in the request. + rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) { + option (google.api.http) = { + get: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates" + additional_bindings { + get: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates" + } + }; + } + + // Deletes a workflow template. It does not cancel in-progress workflows. + rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}" + additional_bindings { + delete: "/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}" + } + }; + } +} + +// A Cloud Dataproc workflow template resource. +message WorkflowTemplate { + // Required. The template id. + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of between 3 and 50 characters. + string id = 2; + + // Output only. The "resource name" of the template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + string name = 1; + + // Optional. Used to perform a consistent read-modify-write. + // + // This field should be left blank for a `CreateWorkflowTemplate` request. It + // is required for an `UpdateWorkflowTemplate` request, and must match the + // current server version. A typical update template flow would fetch the + // current template with a `GetWorkflowTemplate` request, which will return + // the current template with the `version` field filled in with the + // current server version. The user updates other fields in the template, + // then returns it as part of the `UpdateWorkflowTemplate` request. + int32 version = 3; + + // Output only. The time template was created. + google.protobuf.Timestamp create_time = 4; + + // Output only. The time template was last updated. + google.protobuf.Timestamp update_time = 5; + + // Optional. The labels to associate with this template. These labels + // will be propagated to all jobs and clusters created by the workflow + // instance. + // + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // + // No more than 32 labels can be associated with a template. + map labels = 6; + + // Required. WorkflowTemplate scheduling information. + WorkflowTemplatePlacement placement = 7; + + // Required. The Directed Acyclic Graph of Jobs to submit. + repeated OrderedJob jobs = 8; +} + +// Specifies workflow execution target. +// +// Either `managed_cluster` or `cluster_selector` is required. +message WorkflowTemplatePlacement { + // Required. Specifies where workflow executes; either on a managed + // cluster or an existing cluster chosen by labels. + oneof placement { + // Optional. A cluster that is managed by the workflow. + ManagedCluster managed_cluster = 1; + + // Optional. A selector that chooses target cluster for jobs based + // on metadata. + // + // The selector is evaluated at the time each job is submitted. + ClusterSelector cluster_selector = 2; + } +} + +// Cluster that is managed by the workflow. +message ManagedCluster { + // Required. The cluster name prefix. A unique cluster name will be formed by + // appending a random suffix. + // + // The name must contain only lower-case letters (a-z), numbers (0-9), + // and hyphens (-). Must begin with a letter. Cannot begin or end with + // hyphen. Must consist of between 2 and 35 characters. + string cluster_name = 2; + + // Required. The cluster configuration. + ClusterConfig config = 3; + + // Optional. The labels to associate with this cluster. + // + // Label keys must be between 1 and 63 characters long, and must conform to + // the following PCRE regular expression: + // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + // + // Label values must be between 1 and 63 characters long, and must conform to + // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // + // No more than 32 labels can be associated with a given cluster. + map labels = 4; +} + +// A selector that chooses target cluster for jobs based on metadata. +message ClusterSelector { + // Optional. The zone where workflow process executes. This parameter does not + // affect the selection of the cluster. + // + // If unspecified, the zone of the first cluster matching the selector + // is used. + string zone = 1; + + // Required. The cluster labels. Cluster must have all labels + // to match. + map cluster_labels = 2; +} + +// A job executed by the workflow. +message OrderedJob { + // Required. The step id. The id must be unique among all jobs + // within the template. + // + // The step id is used as prefix for job id, as job + // `goog-dataproc-workflow-step-id` label, and in + // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other + // steps. + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of between 3 and 50 characters. + string step_id = 1; + + // Required. The job definition. + oneof job_type { + // Job is a Hadoop job. + HadoopJob hadoop_job = 2; + + // Job is a Spark job. + SparkJob spark_job = 3; + + // Job is a Pyspark job. + PySparkJob pyspark_job = 4; + + // Job is a Hive job. + HiveJob hive_job = 5; + + // Job is a Pig job. + PigJob pig_job = 6; + + // Job is a SparkSql job. + SparkSqlJob spark_sql_job = 7; + } + + // Optional. The labels to associate with this job. + // + // Label keys must be between 1 and 63 characters long, and must conform to + // the following regular expression: + // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + // + // Label values must be between 1 and 63 characters long, and must conform to + // the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // + // No more than 32 labels can be associated with a given job. + map labels = 8; + + // Optional. Job scheduling configuration. + JobScheduling scheduling = 9; + + // Optional. The optional list of prerequisite job step_ids. + // If not specified, the job will start at the beginning of workflow. + repeated string prerequisite_step_ids = 10; +} + +// A Cloud Dataproc workflow template resource. +message WorkflowMetadata { + // The operation state. + enum State { + // Unused. + UNKNOWN = 0; + + // The operation has been created. + PENDING = 1; + + // The operation is running. + RUNNING = 2; + + // The operation is done; either cancelled or completed. + DONE = 3; + } + + // Output only. The "resource name" of the template. + string template = 1; + + // Output only. The version of template at the time of + // workflow instantiation. + int32 version = 2; + + // Output only. The create cluster operation metadata. + ClusterOperation create_cluster = 3; + + // Output only. The workflow graph. + WorkflowGraph graph = 4; + + // Output only. The delete cluster operation metadata. + ClusterOperation delete_cluster = 5; + + // Output only. The workflow state. + State state = 6; + + // Output only. The name of the managed cluster. + string cluster_name = 7; + + // Map from parameter names to values that were used for those parameters. + map parameters = 8; +} + +// The cluster operation triggered by a workflow. +message ClusterOperation { + // Output only. The id of the cluster operation. + string operation_id = 1; + + // Output only. Error, if operation failed. + string error = 2; + + // Output only. Indicates the operation is done. + bool done = 3; +} + +// The workflow graph. +message WorkflowGraph { + // Output only. The workflow nodes. + repeated WorkflowNode nodes = 1; +} + +// The workflow node. +message WorkflowNode { + // The workflow node state. + enum NodeState { + // State is unspecified. + NODE_STATUS_UNSPECIFIED = 0; + + // The node is awaiting prerequisite node to finish. + BLOCKED = 1; + + // The node is runnable but not running. + RUNNABLE = 2; + + // The node is running. + RUNNING = 3; + + // The node completed successfully. + COMPLETED = 4; + + // The node failed. A node can be marked FAILED because + // its ancestor or peer failed. + FAILED = 5; + } + + // Output only. The name of the node. + string step_id = 1; + + // Output only. Node's prerequisite nodes. + repeated string prerequisite_step_ids = 2; + + // Output only. The job id; populated after the node enters RUNNING state. + string job_id = 3; + + // Output only. The node state. + NodeState state = 5; + + // Output only. The error detail. + string error = 6; +} + +// A request to create a workflow template. +message CreateWorkflowTemplateRequest { + // Required. The "resource name" of the region, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}` + string parent = 1; + + // Required. The Dataproc workflow template to create. + WorkflowTemplate template = 2; +} + +// A request to fetch a workflow template. +message GetWorkflowTemplateRequest { + // Required. The "resource name" of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + string name = 1; + + // Optional. The version of workflow template to retrieve. Only previously + // instatiated versions can be retrieved. + // + // If unspecified, retrieves the current version. + int32 version = 2; +} + +// A request to instantiate a workflow template. +message InstantiateWorkflowTemplateRequest { + // Required. The "resource name" of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + string name = 1; + + // Optional. The version of workflow template to instantiate. If specified, + // the workflow will be instantiated only if the current version of + // the workflow template has the supplied version. + // + // This option cannot be used to instantiate a previous version of + // workflow template. + int32 version = 2; + + // Optional. A tag that prevents multiple concurrent workflow + // instances with the same tag from running. This mitigates risk of + // concurrent instances started due to retries. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The tag must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string instance_id = 3; +} + +// A request to instantiate an inline workflow template. +message InstantiateInlineWorkflowTemplateRequest { + // Required. The "resource name" of the workflow template region, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}` + string parent = 1; + + // Required. The workflow template to instantiate. + WorkflowTemplate template = 2; + + // Optional. A tag that prevents multiple concurrent workflow + // instances with the same tag from running. This mitigates risk of + // concurrent instances started due to retries. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The tag must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string instance_id = 3; +} + +// A request to update a workflow template. +message UpdateWorkflowTemplateRequest { + // Required. The updated workflow template. + // + // The `template.version` field must match the current version. + WorkflowTemplate template = 1; +} + +// A request to list workflow templates in a project. +message ListWorkflowTemplatesRequest { + // Required. The "resource name" of the region, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}` + string parent = 1; + + // Optional. The maximum number of results to return in each response. + int32 page_size = 2; + + // Optional. The page token, returned by a previous call, to request the + // next page of results. + string page_token = 3; +} + +// A response to a request to list workflow templates in a project. +message ListWorkflowTemplatesResponse { + // Output only. WorkflowTemplates list. + repeated WorkflowTemplate templates = 1; + + // Output only. This token is included in the response if there are more results + // to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent ListWorkflowTemplatesRequest. + string next_page_token = 2; +} + +// A request to delete a workflow template. +// +// Currently started workflows will remain running. +message DeleteWorkflowTemplateRequest { + // Required. The "resource name" of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + string name = 1; + + // Optional. The version of workflow template to delete. If specified, + // will only delete the template if the current server version matches + // specified version. + int32 version = 2; +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClient.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClient.java new file mode 100644 index 000000000000..4b7542fe9f0c --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClient.java @@ -0,0 +1,935 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.stub.ClusterControllerStub; +import com.google.cloud.dataproc.v1beta2.stub.ClusterControllerStubSettings; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsClient; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND SERVICE +/** + * Service Description: The ClusterControllerService provides methods to manage clusters of Compute + * Engine instances. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+ *   String projectId = "";
+ *   String region = "";
+ *   String clusterName = "";
+ *   Cluster response = clusterControllerClient.getCluster(projectId, region, clusterName);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the clusterControllerClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of ClusterControllerSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * ClusterControllerSettings clusterControllerSettings =
+ *     ClusterControllerSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * ClusterControllerClient clusterControllerClient =
+ *     ClusterControllerClient.create(clusterControllerSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * ClusterControllerSettings clusterControllerSettings =
+ *     ClusterControllerSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * ClusterControllerClient clusterControllerClient =
+ *     ClusterControllerClient.create(clusterControllerSettings);
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class ClusterControllerClient implements BackgroundResource { + private final ClusterControllerSettings settings; + private final ClusterControllerStub stub; + private final OperationsClient operationsClient; + + /** Constructs an instance of ClusterControllerClient with default settings. */ + public static final ClusterControllerClient create() throws IOException { + return create(ClusterControllerSettings.newBuilder().build()); + } + + /** + * Constructs an instance of ClusterControllerClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final ClusterControllerClient create(ClusterControllerSettings settings) + throws IOException { + return new ClusterControllerClient(settings); + } + + /** + * Constructs an instance of ClusterControllerClient, using the given stub for making calls. This + * is for advanced usage - prefer to use ClusterControllerSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final ClusterControllerClient create(ClusterControllerStub stub) { + return new ClusterControllerClient(stub); + } + + /** + * Constructs an instance of ClusterControllerClient, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected ClusterControllerClient(ClusterControllerSettings settings) throws IOException { + this.settings = settings; + this.stub = ((ClusterControllerStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = OperationsClient.create(this.stub.getOperationsStub()); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected ClusterControllerClient(ClusterControllerStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = OperationsClient.create(this.stub.getOperationsStub()); + } + + public final ClusterControllerSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public ClusterControllerStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationsClient getOperationsClient() { + return operationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   Cluster response = clusterControllerClient.createClusterAsync(projectId, region, cluster).get();
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the cluster belongs + * to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param cluster Required. The cluster to create. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture createClusterAsync( + String projectId, String region, Cluster cluster) { + + CreateClusterRequest request = + CreateClusterRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setCluster(cluster) + .build(); + return createClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   CreateClusterRequest request = CreateClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setCluster(cluster)
+   *     .build();
+   *   Cluster response = clusterControllerClient.createClusterAsync(request).get();
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture createClusterAsync( + CreateClusterRequest request) { + return createClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   CreateClusterRequest request = CreateClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setCluster(cluster)
+   *     .build();
+   *   OperationFuture<Operation> future = clusterControllerClient.createClusterOperationCallable().futureCall(request);
+   *   // Do something
+   *   Cluster response = future.get();
+   * }
+   * 
+ */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public final OperationCallable + createClusterOperationCallable() { + return stub.createClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   CreateClusterRequest request = CreateClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setCluster(cluster)
+   *     .build();
+   *   ApiFuture<Operation> future = clusterControllerClient.createClusterCallable().futureCall(request);
+   *   // Do something
+   *   Operation response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable createClusterCallable() { + return stub.createClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   UpdateClusterRequest request = UpdateClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .setCluster(cluster)
+   *     .setUpdateMask(updateMask)
+   *     .build();
+   *   Cluster response = clusterControllerClient.updateClusterAsync(request).get();
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture updateClusterAsync( + UpdateClusterRequest request) { + return updateClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   UpdateClusterRequest request = UpdateClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .setCluster(cluster)
+   *     .setUpdateMask(updateMask)
+   *     .build();
+   *   OperationFuture<Operation> future = clusterControllerClient.updateClusterOperationCallable().futureCall(request);
+   *   // Do something
+   *   Cluster response = future.get();
+   * }
+   * 
+ */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public final OperationCallable + updateClusterOperationCallable() { + return stub.updateClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   Cluster cluster = Cluster.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   UpdateClusterRequest request = UpdateClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .setCluster(cluster)
+   *     .setUpdateMask(updateMask)
+   *     .build();
+   *   ApiFuture<Operation> future = clusterControllerClient.updateClusterCallable().futureCall(request);
+   *   // Do something
+   *   Operation response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable updateClusterCallable() { + return stub.updateClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   Empty response = clusterControllerClient.deleteClusterAsync(projectId, region, clusterName).get();
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the cluster belongs + * to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param clusterName Required. The cluster name. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture deleteClusterAsync( + String projectId, String region, String clusterName) { + + DeleteClusterRequest request = + DeleteClusterRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setClusterName(clusterName) + .build(); + return deleteClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   DeleteClusterRequest request = DeleteClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   Empty response = clusterControllerClient.deleteClusterAsync(request).get();
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture deleteClusterAsync( + DeleteClusterRequest request) { + return deleteClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   DeleteClusterRequest request = DeleteClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   OperationFuture<Operation> future = clusterControllerClient.deleteClusterOperationCallable().futureCall(request);
+   *   // Do something
+   *   Empty response = future.get();
+   * }
+   * 
+ */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public final OperationCallable + deleteClusterOperationCallable() { + return stub.deleteClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   DeleteClusterRequest request = DeleteClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   ApiFuture<Operation> future = clusterControllerClient.deleteClusterCallable().futureCall(request);
+   *   // Do something
+   *   future.get();
+   * }
+   * 
+ */ + public final UnaryCallable deleteClusterCallable() { + return stub.deleteClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets the resource representation for a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   Cluster response = clusterControllerClient.getCluster(projectId, region, clusterName);
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the cluster belongs + * to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param clusterName Required. The cluster name. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Cluster getCluster(String projectId, String region, String clusterName) { + + GetClusterRequest request = + GetClusterRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setClusterName(clusterName) + .build(); + return getCluster(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets the resource representation for a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   GetClusterRequest request = GetClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   Cluster response = clusterControllerClient.getCluster(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Cluster getCluster(GetClusterRequest request) { + return getClusterCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets the resource representation for a cluster in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   GetClusterRequest request = GetClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   ApiFuture<Cluster> future = clusterControllerClient.getClusterCallable().futureCall(request);
+   *   // Do something
+   *   Cluster response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable getClusterCallable() { + return stub.getClusterCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists all regions/{region}/clusters in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   for (Cluster element : clusterControllerClient.listClusters(projectId, region).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the cluster belongs + * to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListClustersPagedResponse listClusters(String projectId, String region) { + ListClustersRequest request = + ListClustersRequest.newBuilder().setProjectId(projectId).setRegion(region).build(); + return listClusters(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists all regions/{region}/clusters in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   ListClustersRequest request = ListClustersRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .build();
+   *   for (Cluster element : clusterControllerClient.listClusters(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListClustersPagedResponse listClusters(ListClustersRequest request) { + return listClustersPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists all regions/{region}/clusters in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   ListClustersRequest request = ListClustersRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .build();
+   *   ApiFuture<ListClustersPagedResponse> future = clusterControllerClient.listClustersPagedCallable().futureCall(request);
+   *   // Do something
+   *   for (Cluster element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ */ + public final UnaryCallable + listClustersPagedCallable() { + return stub.listClustersPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists all regions/{region}/clusters in a project. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   ListClustersRequest request = ListClustersRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .build();
+   *   while (true) {
+   *     ListClustersResponse response = clusterControllerClient.listClustersCallable().call(request);
+   *     for (Cluster element : response.getClustersList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * 
+ */ + public final UnaryCallable listClustersCallable() { + return stub.listClustersCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets cluster diagnostic information. After the operation completes, the Operation.response + * field contains `DiagnoseClusterOutputLocation`. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   Empty response = clusterControllerClient.diagnoseClusterAsync(projectId, region, clusterName).get();
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the cluster belongs + * to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param clusterName Required. The cluster name. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture diagnoseClusterAsync( + String projectId, String region, String clusterName) { + + DiagnoseClusterRequest request = + DiagnoseClusterRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setClusterName(clusterName) + .build(); + return diagnoseClusterAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets cluster diagnostic information. After the operation completes, the Operation.response + * field contains `DiagnoseClusterOutputLocation`. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   DiagnoseClusterRequest request = DiagnoseClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   Empty response = clusterControllerClient.diagnoseClusterAsync(request).get();
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture diagnoseClusterAsync( + DiagnoseClusterRequest request) { + return diagnoseClusterOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets cluster diagnostic information. After the operation completes, the Operation.response + * field contains `DiagnoseClusterOutputLocation`. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   DiagnoseClusterRequest request = DiagnoseClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   OperationFuture<Operation> future = clusterControllerClient.diagnoseClusterOperationCallable().futureCall(request);
+   *   // Do something
+   *   Empty response = future.get();
+   * }
+   * 
+ */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public final OperationCallable + diagnoseClusterOperationCallable() { + return stub.diagnoseClusterOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets cluster diagnostic information. After the operation completes, the Operation.response + * field contains `DiagnoseClusterOutputLocation`. + * + *

Sample code: + * + *


+   * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String clusterName = "";
+   *   DiagnoseClusterRequest request = DiagnoseClusterRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setClusterName(clusterName)
+   *     .build();
+   *   ApiFuture<Operation> future = clusterControllerClient.diagnoseClusterCallable().futureCall(request);
+   *   // Do something
+   *   future.get();
+   * }
+   * 
+ */ + public final UnaryCallable diagnoseClusterCallable() { + return stub.diagnoseClusterCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListClustersPagedResponse + extends AbstractPagedListResponse< + ListClustersRequest, ListClustersResponse, Cluster, ListClustersPage, + ListClustersFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListClustersPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + new ApiFunction() { + @Override + public ListClustersPagedResponse apply(ListClustersPage input) { + return new ListClustersPagedResponse(input); + } + }); + } + + private ListClustersPagedResponse(ListClustersPage page) { + super(page, ListClustersFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListClustersPage + extends AbstractPage { + + private ListClustersPage( + PageContext context, + ListClustersResponse response) { + super(context, response); + } + + private static ListClustersPage createEmptyPage() { + return new ListClustersPage(null, null); + } + + @Override + protected ListClustersPage createPage( + PageContext context, + ListClustersResponse response) { + return new ListClustersPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListClustersFixedSizeCollection + extends AbstractFixedSizeCollection< + ListClustersRequest, ListClustersResponse, Cluster, ListClustersPage, + ListClustersFixedSizeCollection> { + + private ListClustersFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListClustersFixedSizeCollection createEmptyCollection() { + return new ListClustersFixedSizeCollection(null, 0); + } + + @Override + protected ListClustersFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListClustersFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSettings.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSettings.java new file mode 100644 index 000000000000..0fe0068986f8 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSettings.java @@ -0,0 +1,297 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.ClusterControllerClient.ListClustersPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.dataproc.v1beta2.stub.ClusterControllerStubSettings; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link ClusterControllerClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of getCluster to 30 seconds: + * + *

+ * 
+ * ClusterControllerSettings.Builder clusterControllerSettingsBuilder =
+ *     ClusterControllerSettings.newBuilder();
+ * clusterControllerSettingsBuilder.getClusterSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * ClusterControllerSettings clusterControllerSettings = clusterControllerSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class ClusterControllerSettings extends ClientSettings { + /** Returns the object with the settings used for calls to createCluster. */ + public UnaryCallSettings createClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).createClusterSettings(); + } + + /** Returns the object with the settings used for calls to createCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings + createClusterOperationSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).createClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateCluster. */ + public UnaryCallSettings updateClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).updateClusterSettings(); + } + + /** Returns the object with the settings used for calls to updateCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings + updateClusterOperationSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).updateClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to deleteCluster. */ + public UnaryCallSettings deleteClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).deleteClusterSettings(); + } + + /** Returns the object with the settings used for calls to deleteCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings + deleteClusterOperationSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).deleteClusterOperationSettings(); + } + + /** Returns the object with the settings used for calls to getCluster. */ + public UnaryCallSettings getClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).getClusterSettings(); + } + + /** Returns the object with the settings used for calls to listClusters. */ + public PagedCallSettings + listClustersSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).listClustersSettings(); + } + + /** Returns the object with the settings used for calls to diagnoseCluster. */ + public UnaryCallSettings diagnoseClusterSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).diagnoseClusterSettings(); + } + + /** Returns the object with the settings used for calls to diagnoseCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings + diagnoseClusterOperationSettings() { + return ((ClusterControllerStubSettings) getStubSettings()).diagnoseClusterOperationSettings(); + } + + public static final ClusterControllerSettings create(ClusterControllerStubSettings stub) + throws IOException { + return new ClusterControllerSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return ClusterControllerStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return ClusterControllerStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return ClusterControllerStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return ClusterControllerStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return ClusterControllerStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return ClusterControllerStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ClusterControllerStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected ClusterControllerSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for ClusterControllerSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(ClusterControllerStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(ClusterControllerStubSettings.newBuilder()); + } + + protected Builder(ClusterControllerSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(ClusterControllerStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public ClusterControllerStubSettings.Builder getStubSettingsBuilder() { + return ((ClusterControllerStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createCluster. */ + public UnaryCallSettings.Builder createClusterSettings() { + return getStubSettingsBuilder().createClusterSettings(); + } + + /** Returns the builder for the settings used for calls to createCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings.Builder + createClusterOperationSettings() { + return getStubSettingsBuilder().createClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateCluster. */ + public UnaryCallSettings.Builder updateClusterSettings() { + return getStubSettingsBuilder().updateClusterSettings(); + } + + /** Returns the builder for the settings used for calls to updateCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings.Builder + updateClusterOperationSettings() { + return getStubSettingsBuilder().updateClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to deleteCluster. */ + public UnaryCallSettings.Builder deleteClusterSettings() { + return getStubSettingsBuilder().deleteClusterSettings(); + } + + /** Returns the builder for the settings used for calls to deleteCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings.Builder + deleteClusterOperationSettings() { + return getStubSettingsBuilder().deleteClusterOperationSettings(); + } + + /** Returns the builder for the settings used for calls to getCluster. */ + public UnaryCallSettings.Builder getClusterSettings() { + return getStubSettingsBuilder().getClusterSettings(); + } + + /** Returns the builder for the settings used for calls to listClusters. */ + public PagedCallSettings.Builder< + ListClustersRequest, ListClustersResponse, ListClustersPagedResponse> + listClustersSettings() { + return getStubSettingsBuilder().listClustersSettings(); + } + + /** Returns the builder for the settings used for calls to diagnoseCluster. */ + public UnaryCallSettings.Builder diagnoseClusterSettings() { + return getStubSettingsBuilder().diagnoseClusterSettings(); + } + + /** Returns the builder for the settings used for calls to diagnoseCluster. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings.Builder + diagnoseClusterOperationSettings() { + return getStubSettingsBuilder().diagnoseClusterOperationSettings(); + } + + @Override + public ClusterControllerSettings build() throws IOException { + return new ClusterControllerSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerClient.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerClient.java new file mode 100644 index 000000000000..8729b99b82ed --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerClient.java @@ -0,0 +1,773 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.stub.JobControllerStub; +import com.google.cloud.dataproc.v1beta2.stub.JobControllerStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND SERVICE +/** + * Service Description: The JobController provides methods to manage jobs. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+ *   String projectId = "";
+ *   String region = "";
+ *   Job job = Job.newBuilder().build();
+ *   Job response = jobControllerClient.submitJob(projectId, region, job);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the jobControllerClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of JobControllerSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * JobControllerSettings jobControllerSettings =
+ *     JobControllerSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * JobControllerClient jobControllerClient =
+ *     JobControllerClient.create(jobControllerSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * JobControllerSettings jobControllerSettings =
+ *     JobControllerSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * JobControllerClient jobControllerClient =
+ *     JobControllerClient.create(jobControllerSettings);
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class JobControllerClient implements BackgroundResource { + private final JobControllerSettings settings; + private final JobControllerStub stub; + + /** Constructs an instance of JobControllerClient with default settings. */ + public static final JobControllerClient create() throws IOException { + return create(JobControllerSettings.newBuilder().build()); + } + + /** + * Constructs an instance of JobControllerClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final JobControllerClient create(JobControllerSettings settings) + throws IOException { + return new JobControllerClient(settings); + } + + /** + * Constructs an instance of JobControllerClient, using the given stub for making calls. This is + * for advanced usage - prefer to use JobControllerSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final JobControllerClient create(JobControllerStub stub) { + return new JobControllerClient(stub); + } + + /** + * Constructs an instance of JobControllerClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected JobControllerClient(JobControllerSettings settings) throws IOException { + this.settings = settings; + this.stub = ((JobControllerStubSettings) settings.getStubSettings()).createStub(); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected JobControllerClient(JobControllerStub stub) { + this.settings = null; + this.stub = stub; + } + + public final JobControllerSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public JobControllerStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Submits a job to a cluster. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Job job = Job.newBuilder().build();
+   *   Job response = jobControllerClient.submitJob(projectId, region, job);
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the job belongs to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param job Required. The job resource. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job submitJob(String projectId, String region, Job job) { + + SubmitJobRequest request = + SubmitJobRequest.newBuilder().setProjectId(projectId).setRegion(region).setJob(job).build(); + return submitJob(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Submits a job to a cluster. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Job job = Job.newBuilder().build();
+   *   SubmitJobRequest request = SubmitJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJob(job)
+   *     .build();
+   *   Job response = jobControllerClient.submitJob(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job submitJob(SubmitJobRequest request) { + return submitJobCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Submits a job to a cluster. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   Job job = Job.newBuilder().build();
+   *   SubmitJobRequest request = SubmitJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJob(job)
+   *     .build();
+   *   ApiFuture<Job> future = jobControllerClient.submitJobCallable().futureCall(request);
+   *   // Do something
+   *   Job response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable submitJobCallable() { + return stub.submitJobCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets the resource representation for a job in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   Job response = jobControllerClient.getJob(projectId, region, jobId);
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the job belongs to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param jobId Required. The job ID. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job getJob(String projectId, String region, String jobId) { + + GetJobRequest request = + GetJobRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setJobId(jobId) + .build(); + return getJob(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets the resource representation for a job in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   GetJobRequest request = GetJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .build();
+   *   Job response = jobControllerClient.getJob(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job getJob(GetJobRequest request) { + return getJobCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets the resource representation for a job in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   GetJobRequest request = GetJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .build();
+   *   ApiFuture<Job> future = jobControllerClient.getJobCallable().futureCall(request);
+   *   // Do something
+   *   Job response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable getJobCallable() { + return stub.getJobCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists regions/{region}/jobs in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   for (Job element : jobControllerClient.listJobs(projectId, region).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the job belongs to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListJobsPagedResponse listJobs(String projectId, String region) { + ListJobsRequest request = + ListJobsRequest.newBuilder().setProjectId(projectId).setRegion(region).build(); + return listJobs(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists regions/{region}/jobs in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   ListJobsRequest request = ListJobsRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .build();
+   *   for (Job element : jobControllerClient.listJobs(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListJobsPagedResponse listJobs(ListJobsRequest request) { + return listJobsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists regions/{region}/jobs in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   ListJobsRequest request = ListJobsRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .build();
+   *   ApiFuture<ListJobsPagedResponse> future = jobControllerClient.listJobsPagedCallable().futureCall(request);
+   *   // Do something
+   *   for (Job element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ */ + public final UnaryCallable listJobsPagedCallable() { + return stub.listJobsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists regions/{region}/jobs in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   ListJobsRequest request = ListJobsRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .build();
+   *   while (true) {
+   *     ListJobsResponse response = jobControllerClient.listJobsCallable().call(request);
+   *     for (Job element : response.getJobsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * 
+ */ + public final UnaryCallable listJobsCallable() { + return stub.listJobsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates a job in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   Job job = Job.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   UpdateJobRequest request = UpdateJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .setJob(job)
+   *     .setUpdateMask(updateMask)
+   *     .build();
+   *   Job response = jobControllerClient.updateJob(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job updateJob(UpdateJobRequest request) { + return updateJobCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates a job in a project. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   Job job = Job.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   UpdateJobRequest request = UpdateJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .setJob(job)
+   *     .setUpdateMask(updateMask)
+   *     .build();
+   *   ApiFuture<Job> future = jobControllerClient.updateJobCallable().futureCall(request);
+   *   // Do something
+   *   Job response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable updateJobCallable() { + return stub.updateJobCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Starts a job cancellation request. To access the job resource after cancellation, call + * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   Job response = jobControllerClient.cancelJob(projectId, region, jobId);
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the job belongs to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param jobId Required. The job ID. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job cancelJob(String projectId, String region, String jobId) { + + CancelJobRequest request = + CancelJobRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setJobId(jobId) + .build(); + return cancelJob(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Starts a job cancellation request. To access the job resource after cancellation, call + * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   CancelJobRequest request = CancelJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .build();
+   *   Job response = jobControllerClient.cancelJob(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Job cancelJob(CancelJobRequest request) { + return cancelJobCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Starts a job cancellation request. To access the job resource after cancellation, call + * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   CancelJobRequest request = CancelJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .build();
+   *   ApiFuture<Job> future = jobControllerClient.cancelJobCallable().futureCall(request);
+   *   // Do something
+   *   Job response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable cancelJobCallable() { + return stub.cancelJobCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes the job from the project. If the job is active, the delete fails, and the response + * returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   jobControllerClient.deleteJob(projectId, region, jobId);
+   * }
+   * 
+ * + * @param projectId Required. The ID of the Google Cloud Platform project that the job belongs to. + * @param region Required. The Cloud Dataproc region in which to handle the request. + * @param jobId Required. The job ID. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteJob(String projectId, String region, String jobId) { + + DeleteJobRequest request = + DeleteJobRequest.newBuilder() + .setProjectId(projectId) + .setRegion(region) + .setJobId(jobId) + .build(); + deleteJob(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes the job from the project. If the job is active, the delete fails, and the response + * returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   DeleteJobRequest request = DeleteJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .build();
+   *   jobControllerClient.deleteJob(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteJob(DeleteJobRequest request) { + deleteJobCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes the job from the project. If the job is active, the delete fails, and the response + * returns `FAILED_PRECONDITION`. + * + *

Sample code: + * + *


+   * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+   *   String projectId = "";
+   *   String region = "";
+   *   String jobId = "";
+   *   DeleteJobRequest request = DeleteJobRequest.newBuilder()
+   *     .setProjectId(projectId)
+   *     .setRegion(region)
+   *     .setJobId(jobId)
+   *     .build();
+   *   ApiFuture<Void> future = jobControllerClient.deleteJobCallable().futureCall(request);
+   *   // Do something
+   *   future.get();
+   * }
+   * 
+ */ + public final UnaryCallable deleteJobCallable() { + return stub.deleteJobCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListJobsPagedResponse + extends AbstractPagedListResponse< + ListJobsRequest, ListJobsResponse, Job, ListJobsPage, ListJobsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListJobsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + new ApiFunction() { + @Override + public ListJobsPagedResponse apply(ListJobsPage input) { + return new ListJobsPagedResponse(input); + } + }); + } + + private ListJobsPagedResponse(ListJobsPage page) { + super(page, ListJobsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListJobsPage + extends AbstractPage { + + private ListJobsPage( + PageContext context, ListJobsResponse response) { + super(context, response); + } + + private static ListJobsPage createEmptyPage() { + return new ListJobsPage(null, null); + } + + @Override + protected ListJobsPage createPage( + PageContext context, ListJobsResponse response) { + return new ListJobsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListJobsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListJobsRequest, ListJobsResponse, Job, ListJobsPage, ListJobsFixedSizeCollection> { + + private ListJobsFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListJobsFixedSizeCollection createEmptyCollection() { + return new ListJobsFixedSizeCollection(null, 0); + } + + @Override + protected ListJobsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListJobsFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerSettings.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerSettings.java new file mode 100644 index 000000000000..00264006fe17 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerSettings.java @@ -0,0 +1,230 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.JobControllerClient.ListJobsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.dataproc.v1beta2.stub.JobControllerStubSettings; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link JobControllerClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of submitJob to 30 seconds: + * + *

+ * 
+ * JobControllerSettings.Builder jobControllerSettingsBuilder =
+ *     JobControllerSettings.newBuilder();
+ * jobControllerSettingsBuilder.submitJobSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * JobControllerSettings jobControllerSettings = jobControllerSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class JobControllerSettings extends ClientSettings { + /** Returns the object with the settings used for calls to submitJob. */ + public UnaryCallSettings submitJobSettings() { + return ((JobControllerStubSettings) getStubSettings()).submitJobSettings(); + } + + /** Returns the object with the settings used for calls to getJob. */ + public UnaryCallSettings getJobSettings() { + return ((JobControllerStubSettings) getStubSettings()).getJobSettings(); + } + + /** Returns the object with the settings used for calls to listJobs. */ + public PagedCallSettings + listJobsSettings() { + return ((JobControllerStubSettings) getStubSettings()).listJobsSettings(); + } + + /** Returns the object with the settings used for calls to updateJob. */ + public UnaryCallSettings updateJobSettings() { + return ((JobControllerStubSettings) getStubSettings()).updateJobSettings(); + } + + /** Returns the object with the settings used for calls to cancelJob. */ + public UnaryCallSettings cancelJobSettings() { + return ((JobControllerStubSettings) getStubSettings()).cancelJobSettings(); + } + + /** Returns the object with the settings used for calls to deleteJob. */ + public UnaryCallSettings deleteJobSettings() { + return ((JobControllerStubSettings) getStubSettings()).deleteJobSettings(); + } + + public static final JobControllerSettings create(JobControllerStubSettings stub) + throws IOException { + return new JobControllerSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return JobControllerStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return JobControllerStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return JobControllerStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return JobControllerStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return JobControllerStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return JobControllerStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return JobControllerStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected JobControllerSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for JobControllerSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(JobControllerStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(JobControllerStubSettings.newBuilder()); + } + + protected Builder(JobControllerSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(JobControllerStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public JobControllerStubSettings.Builder getStubSettingsBuilder() { + return ((JobControllerStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to submitJob. */ + public UnaryCallSettings.Builder submitJobSettings() { + return getStubSettingsBuilder().submitJobSettings(); + } + + /** Returns the builder for the settings used for calls to getJob. */ + public UnaryCallSettings.Builder getJobSettings() { + return getStubSettingsBuilder().getJobSettings(); + } + + /** Returns the builder for the settings used for calls to listJobs. */ + public PagedCallSettings.Builder + listJobsSettings() { + return getStubSettingsBuilder().listJobsSettings(); + } + + /** Returns the builder for the settings used for calls to updateJob. */ + public UnaryCallSettings.Builder updateJobSettings() { + return getStubSettingsBuilder().updateJobSettings(); + } + + /** Returns the builder for the settings used for calls to cancelJob. */ + public UnaryCallSettings.Builder cancelJobSettings() { + return getStubSettingsBuilder().cancelJobSettings(); + } + + /** Returns the builder for the settings used for calls to deleteJob. */ + public UnaryCallSettings.Builder deleteJobSettings() { + return getStubSettingsBuilder().deleteJobSettings(); + } + + @Override + public JobControllerSettings build() throws IOException { + return new JobControllerSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClient.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClient.java new file mode 100644 index 000000000000..63feb23ba17b --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClient.java @@ -0,0 +1,1026 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.stub.WorkflowTemplateServiceStub; +import com.google.cloud.dataproc.v1beta2.stub.WorkflowTemplateServiceStubSettings; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsClient; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND SERVICE +/** + * Service Description: The API interface for managing Workflow Templates in the Cloud Dataproc API. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+ *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+ *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+ *   WorkflowTemplate response = workflowTemplateServiceClient.createWorkflowTemplate(parent, template);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the workflowTemplateServiceClient object to clean up + * resources such as threads. In the example above, try-with-resources is used, which automatically + * calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of + * WorkflowTemplateServiceSettings to create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * WorkflowTemplateServiceSettings workflowTemplateServiceSettings =
+ *     WorkflowTemplateServiceSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * WorkflowTemplateServiceClient workflowTemplateServiceClient =
+ *     WorkflowTemplateServiceClient.create(workflowTemplateServiceSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * WorkflowTemplateServiceSettings workflowTemplateServiceSettings =
+ *     WorkflowTemplateServiceSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * WorkflowTemplateServiceClient workflowTemplateServiceClient =
+ *     WorkflowTemplateServiceClient.create(workflowTemplateServiceSettings);
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class WorkflowTemplateServiceClient implements BackgroundResource { + private final WorkflowTemplateServiceSettings settings; + private final WorkflowTemplateServiceStub stub; + private final OperationsClient operationsClient; + + /** Constructs an instance of WorkflowTemplateServiceClient with default settings. */ + public static final WorkflowTemplateServiceClient create() throws IOException { + return create(WorkflowTemplateServiceSettings.newBuilder().build()); + } + + /** + * Constructs an instance of WorkflowTemplateServiceClient, using the given settings. The channels + * are created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final WorkflowTemplateServiceClient create(WorkflowTemplateServiceSettings settings) + throws IOException { + return new WorkflowTemplateServiceClient(settings); + } + + /** + * Constructs an instance of WorkflowTemplateServiceClient, using the given stub for making calls. + * This is for advanced usage - prefer to use WorkflowTemplateServiceSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final WorkflowTemplateServiceClient create(WorkflowTemplateServiceStub stub) { + return new WorkflowTemplateServiceClient(stub); + } + + /** + * Constructs an instance of WorkflowTemplateServiceClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected WorkflowTemplateServiceClient(WorkflowTemplateServiceSettings settings) + throws IOException { + this.settings = settings; + this.stub = ((WorkflowTemplateServiceStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = OperationsClient.create(this.stub.getOperationsStub()); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected WorkflowTemplateServiceClient(WorkflowTemplateServiceStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = OperationsClient.create(this.stub.getOperationsStub()); + } + + public final WorkflowTemplateServiceSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public WorkflowTemplateServiceStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationsClient getOperationsClient() { + return operationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates new workflow template. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   WorkflowTemplate response = workflowTemplateServiceClient.createWorkflowTemplate(parent, template);
+   * }
+   * 
+ * + * @param parent Required. The "resource name" of the region, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}` + * @param template Required. The Dataproc workflow template to create. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate createWorkflowTemplate( + RegionName parent, WorkflowTemplate template) { + + CreateWorkflowTemplateRequest request = + CreateWorkflowTemplateRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setTemplate(template) + .build(); + return createWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates new workflow template. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   WorkflowTemplate response = workflowTemplateServiceClient.createWorkflowTemplate(parent.toString(), template);
+   * }
+   * 
+ * + * @param parent Required. The "resource name" of the region, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}` + * @param template Required. The Dataproc workflow template to create. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate createWorkflowTemplate(String parent, WorkflowTemplate template) { + + CreateWorkflowTemplateRequest request = + CreateWorkflowTemplateRequest.newBuilder().setParent(parent).setTemplate(template).build(); + return createWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates new workflow template. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   CreateWorkflowTemplateRequest request = CreateWorkflowTemplateRequest.newBuilder()
+   *     .setParent(parent.toString())
+   *     .setTemplate(template)
+   *     .build();
+   *   WorkflowTemplate response = workflowTemplateServiceClient.createWorkflowTemplate(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate createWorkflowTemplate(CreateWorkflowTemplateRequest request) { + return createWorkflowTemplateCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates new workflow template. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   CreateWorkflowTemplateRequest request = CreateWorkflowTemplateRequest.newBuilder()
+   *     .setParent(parent.toString())
+   *     .setTemplate(template)
+   *     .build();
+   *   ApiFuture<WorkflowTemplate> future = workflowTemplateServiceClient.createWorkflowTemplateCallable().futureCall(request);
+   *   // Do something
+   *   WorkflowTemplate response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + createWorkflowTemplateCallable() { + return stub.createWorkflowTemplateCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Retrieves the latest workflow template. + * + *

Can retrieve previously instantiated template by specifying optional version parameter. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   WorkflowTemplate response = workflowTemplateServiceClient.getWorkflowTemplate(name);
+   * }
+   * 
+ * + * @param name Required. The "resource name" of the workflow template, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate getWorkflowTemplate(WorkflowTemplateName name) { + + GetWorkflowTemplateRequest request = + GetWorkflowTemplateRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Retrieves the latest workflow template. + * + *

Can retrieve previously instantiated template by specifying optional version parameter. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   WorkflowTemplate response = workflowTemplateServiceClient.getWorkflowTemplate(name.toString());
+   * }
+   * 
+ * + * @param name Required. The "resource name" of the workflow template, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate getWorkflowTemplate(String name) { + + GetWorkflowTemplateRequest request = + GetWorkflowTemplateRequest.newBuilder().setName(name).build(); + return getWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Retrieves the latest workflow template. + * + *

Can retrieve previously instantiated template by specifying optional version parameter. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   GetWorkflowTemplateRequest request = GetWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   WorkflowTemplate response = workflowTemplateServiceClient.getWorkflowTemplate(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate getWorkflowTemplate(GetWorkflowTemplateRequest request) { + return getWorkflowTemplateCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Retrieves the latest workflow template. + * + *

Can retrieve previously instantiated template by specifying optional version parameter. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   GetWorkflowTemplateRequest request = GetWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   ApiFuture<WorkflowTemplate> future = workflowTemplateServiceClient.getWorkflowTemplateCallable().futureCall(request);
+   *   // Do something
+   *   WorkflowTemplate response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + getWorkflowTemplateCallable() { + return stub.getWorkflowTemplateCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Instantiates a template and begins execution. + * + *

The returned Operation can be used to track execution of workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when + * entire workflow is finished. + * + *

The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any + * inflight jobs to be cancelled and workflow-owned clusters to be deleted. + * + *

The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * + *

On successful completion, [Operation.response][google.longrunning.Operation.response] will + * be [Empty][google.protobuf.Empty]. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   String instanceId = "";
+   *   Empty response = workflowTemplateServiceClient.instantiateWorkflowTemplateAsync(name, instanceId).get();
+   * }
+   * 
+ * + * @param name Required. The "resource name" of the workflow template, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * @param instanceId Optional. A tag that prevents multiple concurrent workflow instances with the + * same tag from running. This mitigates risk of concurrent instances started due to retries. + *

It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + *

The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and + * hyphens (-). The maximum length is 40 characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture instantiateWorkflowTemplateAsync( + WorkflowTemplateName name, String instanceId) { + + InstantiateWorkflowTemplateRequest request = + InstantiateWorkflowTemplateRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .setInstanceId(instanceId) + .build(); + return instantiateWorkflowTemplateAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Instantiates a template and begins execution. + * + *

The returned Operation can be used to track execution of workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when + * entire workflow is finished. + * + *

The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any + * inflight jobs to be cancelled and workflow-owned clusters to be deleted. + * + *

The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * + *

On successful completion, [Operation.response][google.longrunning.Operation.response] will + * be [Empty][google.protobuf.Empty]. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   String instanceId = "";
+   *   Empty response = workflowTemplateServiceClient.instantiateWorkflowTemplateAsync(name.toString(), instanceId).get();
+   * }
+   * 
+ * + * @param name Required. The "resource name" of the workflow template, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * @param instanceId Optional. A tag that prevents multiple concurrent workflow instances with the + * same tag from running. This mitigates risk of concurrent instances started due to retries. + *

It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + *

The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and + * hyphens (-). The maximum length is 40 characters. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture instantiateWorkflowTemplateAsync( + String name, String instanceId) { + + InstantiateWorkflowTemplateRequest request = + InstantiateWorkflowTemplateRequest.newBuilder() + .setName(name) + .setInstanceId(instanceId) + .build(); + return instantiateWorkflowTemplateAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Instantiates a template and begins execution. + * + *

The returned Operation can be used to track execution of workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when + * entire workflow is finished. + * + *

The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any + * inflight jobs to be cancelled and workflow-owned clusters to be deleted. + * + *

The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * + *

On successful completion, [Operation.response][google.longrunning.Operation.response] will + * be [Empty][google.protobuf.Empty]. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   InstantiateWorkflowTemplateRequest request = InstantiateWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   Empty response = workflowTemplateServiceClient.instantiateWorkflowTemplateAsync(request).get();
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public final OperationFuture instantiateWorkflowTemplateAsync( + InstantiateWorkflowTemplateRequest request) { + return instantiateWorkflowTemplateOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Instantiates a template and begins execution. + * + *

The returned Operation can be used to track execution of workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when + * entire workflow is finished. + * + *

The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any + * inflight jobs to be cancelled and workflow-owned clusters to be deleted. + * + *

The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * + *

On successful completion, [Operation.response][google.longrunning.Operation.response] will + * be [Empty][google.protobuf.Empty]. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   InstantiateWorkflowTemplateRequest request = InstantiateWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   OperationFuture<Operation> future = workflowTemplateServiceClient.instantiateWorkflowTemplateOperationCallable().futureCall(request);
+   *   // Do something
+   *   Empty response = future.get();
+   * }
+   * 
+ */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public final OperationCallable + instantiateWorkflowTemplateOperationCallable() { + return stub.instantiateWorkflowTemplateOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Instantiates a template and begins execution. + * + *

The returned Operation can be used to track execution of workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when + * entire workflow is finished. + * + *

The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any + * inflight jobs to be cancelled and workflow-owned clusters to be deleted. + * + *

The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * + *

On successful completion, [Operation.response][google.longrunning.Operation.response] will + * be [Empty][google.protobuf.Empty]. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   InstantiateWorkflowTemplateRequest request = InstantiateWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   ApiFuture<Operation> future = workflowTemplateServiceClient.instantiateWorkflowTemplateCallable().futureCall(request);
+   *   // Do something
+   *   future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + instantiateWorkflowTemplateCallable() { + return stub.instantiateWorkflowTemplateCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates (replaces) workflow template. The updated template must contain version that matches + * the current server version. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   WorkflowTemplate response = workflowTemplateServiceClient.updateWorkflowTemplate(template);
+   * }
+   * 
+ * + * @param template Required. The updated workflow template. + *

The `template.version` field must match the current version. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WorkflowTemplate updateWorkflowTemplate(WorkflowTemplate template) { + + UpdateWorkflowTemplateRequest request = + UpdateWorkflowTemplateRequest.newBuilder().setTemplate(template).build(); + return updateWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates (replaces) workflow template. The updated template must contain version that matches + * the current server version. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   UpdateWorkflowTemplateRequest request = UpdateWorkflowTemplateRequest.newBuilder()
+   *     .setTemplate(template)
+   *     .build();
+   *   WorkflowTemplate response = workflowTemplateServiceClient.updateWorkflowTemplate(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + private final WorkflowTemplate updateWorkflowTemplate(UpdateWorkflowTemplateRequest request) { + return updateWorkflowTemplateCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Updates (replaces) workflow template. The updated template must contain version that matches + * the current server version. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+   *   UpdateWorkflowTemplateRequest request = UpdateWorkflowTemplateRequest.newBuilder()
+   *     .setTemplate(template)
+   *     .build();
+   *   ApiFuture<WorkflowTemplate> future = workflowTemplateServiceClient.updateWorkflowTemplateCallable().futureCall(request);
+   *   // Do something
+   *   WorkflowTemplate response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + updateWorkflowTemplateCallable() { + return stub.updateWorkflowTemplateCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists workflows that match the specified filter in the request. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   for (WorkflowTemplate element : workflowTemplateServiceClient.listWorkflowTemplates(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param parent Required. The "resource name" of the region, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListWorkflowTemplatesPagedResponse listWorkflowTemplates(RegionName parent) { + ListWorkflowTemplatesRequest request = + ListWorkflowTemplatesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listWorkflowTemplates(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists workflows that match the specified filter in the request. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   for (WorkflowTemplate element : workflowTemplateServiceClient.listWorkflowTemplates(parent.toString()).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param parent Required. The "resource name" of the region, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListWorkflowTemplatesPagedResponse listWorkflowTemplates(String parent) { + ListWorkflowTemplatesRequest request = + ListWorkflowTemplatesRequest.newBuilder().setParent(parent).build(); + return listWorkflowTemplates(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists workflows that match the specified filter in the request. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   ListWorkflowTemplatesRequest request = ListWorkflowTemplatesRequest.newBuilder()
+   *     .setParent(parent.toString())
+   *     .build();
+   *   for (WorkflowTemplate element : workflowTemplateServiceClient.listWorkflowTemplates(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListWorkflowTemplatesPagedResponse listWorkflowTemplates( + ListWorkflowTemplatesRequest request) { + return listWorkflowTemplatesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists workflows that match the specified filter in the request. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   ListWorkflowTemplatesRequest request = ListWorkflowTemplatesRequest.newBuilder()
+   *     .setParent(parent.toString())
+   *     .build();
+   *   ApiFuture<ListWorkflowTemplatesPagedResponse> future = workflowTemplateServiceClient.listWorkflowTemplatesPagedCallable().futureCall(request);
+   *   // Do something
+   *   for (WorkflowTemplate element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * 
+ */ + public final UnaryCallable + listWorkflowTemplatesPagedCallable() { + return stub.listWorkflowTemplatesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Lists workflows that match the specified filter in the request. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+   *   ListWorkflowTemplatesRequest request = ListWorkflowTemplatesRequest.newBuilder()
+   *     .setParent(parent.toString())
+   *     .build();
+   *   while (true) {
+   *     ListWorkflowTemplatesResponse response = workflowTemplateServiceClient.listWorkflowTemplatesCallable().call(request);
+   *     for (WorkflowTemplate element : response.getTemplatesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * 
+ */ + public final UnaryCallable + listWorkflowTemplatesCallable() { + return stub.listWorkflowTemplatesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a workflow template. It does not cancel in-progress workflows. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   workflowTemplateServiceClient.deleteWorkflowTemplate(name);
+   * }
+   * 
+ * + * @param name Required. The "resource name" of the workflow template, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteWorkflowTemplate(WorkflowTemplateName name) { + + DeleteWorkflowTemplateRequest request = + DeleteWorkflowTemplateRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a workflow template. It does not cancel in-progress workflows. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   workflowTemplateServiceClient.deleteWorkflowTemplate(name.toString());
+   * }
+   * 
+ * + * @param name Required. The "resource name" of the workflow template, as described in + * https://cloud.google.com/apis/design/resource_names of the form + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteWorkflowTemplate(String name) { + + DeleteWorkflowTemplateRequest request = + DeleteWorkflowTemplateRequest.newBuilder().setName(name).build(); + deleteWorkflowTemplate(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a workflow template. It does not cancel in-progress workflows. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   DeleteWorkflowTemplateRequest request = DeleteWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   workflowTemplateServiceClient.deleteWorkflowTemplate(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteWorkflowTemplate(DeleteWorkflowTemplateRequest request) { + deleteWorkflowTemplateCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Deletes a workflow template. It does not cancel in-progress workflows. + * + *

Sample code: + * + *


+   * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+   *   WorkflowTemplateName name = WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]");
+   *   DeleteWorkflowTemplateRequest request = DeleteWorkflowTemplateRequest.newBuilder()
+   *     .setName(name.toString())
+   *     .build();
+   *   ApiFuture<Void> future = workflowTemplateServiceClient.deleteWorkflowTemplateCallable().futureCall(request);
+   *   // Do something
+   *   future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + deleteWorkflowTemplateCallable() { + return stub.deleteWorkflowTemplateCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListWorkflowTemplatesPagedResponse + extends AbstractPagedListResponse< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, WorkflowTemplate, + ListWorkflowTemplatesPage, ListWorkflowTemplatesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListWorkflowTemplatesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + new ApiFunction() { + @Override + public ListWorkflowTemplatesPagedResponse apply(ListWorkflowTemplatesPage input) { + return new ListWorkflowTemplatesPagedResponse(input); + } + }); + } + + private ListWorkflowTemplatesPagedResponse(ListWorkflowTemplatesPage page) { + super(page, ListWorkflowTemplatesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListWorkflowTemplatesPage + extends AbstractPage< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, WorkflowTemplate, + ListWorkflowTemplatesPage> { + + private ListWorkflowTemplatesPage( + PageContext + context, + ListWorkflowTemplatesResponse response) { + super(context, response); + } + + private static ListWorkflowTemplatesPage createEmptyPage() { + return new ListWorkflowTemplatesPage(null, null); + } + + @Override + protected ListWorkflowTemplatesPage createPage( + PageContext + context, + ListWorkflowTemplatesResponse response) { + return new ListWorkflowTemplatesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListWorkflowTemplatesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, WorkflowTemplate, + ListWorkflowTemplatesPage, ListWorkflowTemplatesFixedSizeCollection> { + + private ListWorkflowTemplatesFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListWorkflowTemplatesFixedSizeCollection createEmptyCollection() { + return new ListWorkflowTemplatesFixedSizeCollection(null, 0); + } + + @Override + protected ListWorkflowTemplatesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListWorkflowTemplatesFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceSettings.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceSettings.java new file mode 100644 index 000000000000..62611fdf75cc --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceSettings.java @@ -0,0 +1,270 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.WorkflowTemplateServiceClient.ListWorkflowTemplatesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.dataproc.v1beta2.stub.WorkflowTemplateServiceStubSettings; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link WorkflowTemplateServiceClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of createWorkflowTemplate to 30 seconds: + * + *

+ * 
+ * WorkflowTemplateServiceSettings.Builder workflowTemplateServiceSettingsBuilder =
+ *     WorkflowTemplateServiceSettings.newBuilder();
+ * workflowTemplateServiceSettingsBuilder.createWorkflowTemplateSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * WorkflowTemplateServiceSettings workflowTemplateServiceSettings = workflowTemplateServiceSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class WorkflowTemplateServiceSettings + extends ClientSettings { + /** Returns the object with the settings used for calls to createWorkflowTemplate. */ + public UnaryCallSettings + createWorkflowTemplateSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()) + .createWorkflowTemplateSettings(); + } + + /** Returns the object with the settings used for calls to getWorkflowTemplate. */ + public UnaryCallSettings + getWorkflowTemplateSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()).getWorkflowTemplateSettings(); + } + + /** Returns the object with the settings used for calls to instantiateWorkflowTemplate. */ + public UnaryCallSettings + instantiateWorkflowTemplateSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()) + .instantiateWorkflowTemplateSettings(); + } + + /** Returns the object with the settings used for calls to instantiateWorkflowTemplate. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings + instantiateWorkflowTemplateOperationSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()) + .instantiateWorkflowTemplateOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateWorkflowTemplate. */ + public UnaryCallSettings + updateWorkflowTemplateSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()) + .updateWorkflowTemplateSettings(); + } + + /** Returns the object with the settings used for calls to listWorkflowTemplates. */ + public PagedCallSettings< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + listWorkflowTemplatesSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()) + .listWorkflowTemplatesSettings(); + } + + /** Returns the object with the settings used for calls to deleteWorkflowTemplate. */ + public UnaryCallSettings deleteWorkflowTemplateSettings() { + return ((WorkflowTemplateServiceStubSettings) getStubSettings()) + .deleteWorkflowTemplateSettings(); + } + + public static final WorkflowTemplateServiceSettings create( + WorkflowTemplateServiceStubSettings stub) throws IOException { + return new WorkflowTemplateServiceSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return WorkflowTemplateServiceStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return WorkflowTemplateServiceStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return WorkflowTemplateServiceStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return WorkflowTemplateServiceStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return WorkflowTemplateServiceStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return WorkflowTemplateServiceStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return WorkflowTemplateServiceStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected WorkflowTemplateServiceSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for WorkflowTemplateServiceSettings. */ + public static class Builder + extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(WorkflowTemplateServiceStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(WorkflowTemplateServiceStubSettings.newBuilder()); + } + + protected Builder(WorkflowTemplateServiceSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(WorkflowTemplateServiceStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public WorkflowTemplateServiceStubSettings.Builder getStubSettingsBuilder() { + return ((WorkflowTemplateServiceStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createWorkflowTemplate. */ + public UnaryCallSettings.Builder + createWorkflowTemplateSettings() { + return getStubSettingsBuilder().createWorkflowTemplateSettings(); + } + + /** Returns the builder for the settings used for calls to getWorkflowTemplate. */ + public UnaryCallSettings.Builder + getWorkflowTemplateSettings() { + return getStubSettingsBuilder().getWorkflowTemplateSettings(); + } + + /** Returns the builder for the settings used for calls to instantiateWorkflowTemplate. */ + public UnaryCallSettings.Builder + instantiateWorkflowTemplateSettings() { + return getStubSettingsBuilder().instantiateWorkflowTemplateSettings(); + } + + /** Returns the builder for the settings used for calls to instantiateWorkflowTemplate. */ + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + public OperationCallSettings.Builder< + InstantiateWorkflowTemplateRequest, Empty, WorkflowMetadata> + instantiateWorkflowTemplateOperationSettings() { + return getStubSettingsBuilder().instantiateWorkflowTemplateOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateWorkflowTemplate. */ + public UnaryCallSettings.Builder + updateWorkflowTemplateSettings() { + return getStubSettingsBuilder().updateWorkflowTemplateSettings(); + } + + /** Returns the builder for the settings used for calls to listWorkflowTemplates. */ + public PagedCallSettings.Builder< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + listWorkflowTemplatesSettings() { + return getStubSettingsBuilder().listWorkflowTemplatesSettings(); + } + + /** Returns the builder for the settings used for calls to deleteWorkflowTemplate. */ + public UnaryCallSettings.Builder + deleteWorkflowTemplateSettings() { + return getStubSettingsBuilder().deleteWorkflowTemplateSettings(); + } + + @Override + public WorkflowTemplateServiceSettings build() throws IOException { + return new WorkflowTemplateServiceSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/package-info.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/package-info.java new file mode 100644 index 000000000000..b648f64c96e4 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/package-info.java @@ -0,0 +1,74 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Google Cloud Dataproc API. + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= ClusterControllerClient ======================= + * + *

Service Description: The ClusterControllerService provides methods to manage clusters of + * Compute Engine instances. + * + *

Sample for ClusterControllerClient: + * + *

+ * 
+ * try (ClusterControllerClient clusterControllerClient = ClusterControllerClient.create()) {
+ *   String projectId = "";
+ *   String region = "";
+ *   String clusterName = "";
+ *   Cluster response = clusterControllerClient.getCluster(projectId, region, clusterName);
+ * }
+ * 
+ * 
+ * + * =================== JobControllerClient =================== + * + *

Service Description: The JobController provides methods to manage jobs. + * + *

Sample for JobControllerClient: + * + *

+ * 
+ * try (JobControllerClient jobControllerClient = JobControllerClient.create()) {
+ *   String projectId = "";
+ *   String region = "";
+ *   Job job = Job.newBuilder().build();
+ *   Job response = jobControllerClient.submitJob(projectId, region, job);
+ * }
+ * 
+ * 
+ * + * ============================= WorkflowTemplateServiceClient ============================= + * + *

Service Description: The API interface for managing Workflow Templates in the Cloud Dataproc + * API. + * + *

Sample for WorkflowTemplateServiceClient: + * + *

+ * 
+ * try (WorkflowTemplateServiceClient workflowTemplateServiceClient = WorkflowTemplateServiceClient.create()) {
+ *   RegionName parent = RegionName.of("[PROJECT]", "[REGION]");
+ *   WorkflowTemplate template = WorkflowTemplate.newBuilder().build();
+ *   WorkflowTemplate response = workflowTemplateServiceClient.createWorkflowTemplate(parent, template);
+ * }
+ * 
+ * 
+ */ +package com.google.cloud.dataproc.v1beta2; diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStub.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStub.java new file mode 100644 index 000000000000..075bb9dec63c --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStub.java @@ -0,0 +1,108 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.ClusterControllerClient.ListClustersPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.Cluster; +import com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata; +import com.google.cloud.dataproc.v1beta2.CreateClusterRequest; +import com.google.cloud.dataproc.v1beta2.DeleteClusterRequest; +import com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest; +import com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults; +import com.google.cloud.dataproc.v1beta2.GetClusterRequest; +import com.google.cloud.dataproc.v1beta2.ListClustersRequest; +import com.google.cloud.dataproc.v1beta2.ListClustersResponse; +import com.google.cloud.dataproc.v1beta2.UpdateClusterRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Base stub class for Google Cloud Dataproc API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public abstract class ClusterControllerStub implements BackgroundResource { + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationsStub getOperationsStub() { + throw new UnsupportedOperationException("Not implemented: getOperationsStub()"); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + createClusterOperationCallable() { + throw new UnsupportedOperationException("Not implemented: createClusterOperationCallable()"); + } + + public UnaryCallable createClusterCallable() { + throw new UnsupportedOperationException("Not implemented: createClusterCallable()"); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + updateClusterOperationCallable() { + throw new UnsupportedOperationException("Not implemented: updateClusterOperationCallable()"); + } + + public UnaryCallable updateClusterCallable() { + throw new UnsupportedOperationException("Not implemented: updateClusterCallable()"); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + deleteClusterOperationCallable() { + throw new UnsupportedOperationException("Not implemented: deleteClusterOperationCallable()"); + } + + public UnaryCallable deleteClusterCallable() { + throw new UnsupportedOperationException("Not implemented: deleteClusterCallable()"); + } + + public UnaryCallable getClusterCallable() { + throw new UnsupportedOperationException("Not implemented: getClusterCallable()"); + } + + public UnaryCallable listClustersPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listClustersPagedCallable()"); + } + + public UnaryCallable listClustersCallable() { + throw new UnsupportedOperationException("Not implemented: listClustersCallable()"); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + diagnoseClusterOperationCallable() { + throw new UnsupportedOperationException("Not implemented: diagnoseClusterOperationCallable()"); + } + + public UnaryCallable diagnoseClusterCallable() { + throw new UnsupportedOperationException("Not implemented: diagnoseClusterCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStubSettings.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStubSettings.java new file mode 100644 index 000000000000..691e2e00f732 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/ClusterControllerStubSettings.java @@ -0,0 +1,651 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.ClusterControllerClient.ListClustersPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.Cluster; +import com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata; +import com.google.cloud.dataproc.v1beta2.CreateClusterRequest; +import com.google.cloud.dataproc.v1beta2.DeleteClusterRequest; +import com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest; +import com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults; +import com.google.cloud.dataproc.v1beta2.GetClusterRequest; +import com.google.cloud.dataproc.v1beta2.ListClustersRequest; +import com.google.cloud.dataproc.v1beta2.ListClustersResponse; +import com.google.cloud.dataproc.v1beta2.UpdateClusterRequest; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link ClusterControllerStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of getCluster to 30 seconds: + * + *

+ * 
+ * ClusterControllerStubSettings.Builder clusterControllerSettingsBuilder =
+ *     ClusterControllerStubSettings.newBuilder();
+ * clusterControllerSettingsBuilder.getClusterSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * ClusterControllerStubSettings clusterControllerSettings = clusterControllerSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class ClusterControllerStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().add("https://www.googleapis.com/auth/cloud-platform").build(); + + private final UnaryCallSettings createClusterSettings; + private final OperationCallSettings + createClusterOperationSettings; + private final UnaryCallSettings updateClusterSettings; + private final OperationCallSettings + updateClusterOperationSettings; + private final UnaryCallSettings deleteClusterSettings; + private final OperationCallSettings + deleteClusterOperationSettings; + private final UnaryCallSettings getClusterSettings; + private final PagedCallSettings< + ListClustersRequest, ListClustersResponse, ListClustersPagedResponse> + listClustersSettings; + private final UnaryCallSettings diagnoseClusterSettings; + private final OperationCallSettings + diagnoseClusterOperationSettings; + + /** Returns the object with the settings used for calls to createCluster. */ + public UnaryCallSettings createClusterSettings() { + return createClusterSettings; + } + + /** Returns the object with the settings used for calls to createCluster. */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings + createClusterOperationSettings() { + return createClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to updateCluster. */ + public UnaryCallSettings updateClusterSettings() { + return updateClusterSettings; + } + + /** Returns the object with the settings used for calls to updateCluster. */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings + updateClusterOperationSettings() { + return updateClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to deleteCluster. */ + public UnaryCallSettings deleteClusterSettings() { + return deleteClusterSettings; + } + + /** Returns the object with the settings used for calls to deleteCluster. */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings + deleteClusterOperationSettings() { + return deleteClusterOperationSettings; + } + + /** Returns the object with the settings used for calls to getCluster. */ + public UnaryCallSettings getClusterSettings() { + return getClusterSettings; + } + + /** Returns the object with the settings used for calls to listClusters. */ + public PagedCallSettings + listClustersSettings() { + return listClustersSettings; + } + + /** Returns the object with the settings used for calls to diagnoseCluster. */ + public UnaryCallSettings diagnoseClusterSettings() { + return diagnoseClusterSettings; + } + + /** Returns the object with the settings used for calls to diagnoseCluster. */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings + diagnoseClusterOperationSettings() { + return diagnoseClusterOperationSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public ClusterControllerStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcClusterControllerStub.create(this); + } else { + throw new UnsupportedOperationException( + "Transport not supported: " + getTransportChannelProvider().getTransportName()); + } + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "dataproc.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(ClusterControllerStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected ClusterControllerStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createClusterSettings = settingsBuilder.createClusterSettings().build(); + createClusterOperationSettings = settingsBuilder.createClusterOperationSettings().build(); + updateClusterSettings = settingsBuilder.updateClusterSettings().build(); + updateClusterOperationSettings = settingsBuilder.updateClusterOperationSettings().build(); + deleteClusterSettings = settingsBuilder.deleteClusterSettings().build(); + deleteClusterOperationSettings = settingsBuilder.deleteClusterOperationSettings().build(); + getClusterSettings = settingsBuilder.getClusterSettings().build(); + listClustersSettings = settingsBuilder.listClustersSettings().build(); + diagnoseClusterSettings = settingsBuilder.diagnoseClusterSettings().build(); + diagnoseClusterOperationSettings = settingsBuilder.diagnoseClusterOperationSettings().build(); + } + + private static final PagedListDescriptor + LIST_CLUSTERS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListClustersRequest injectToken(ListClustersRequest payload, String token) { + return ListClustersRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListClustersRequest injectPageSize(ListClustersRequest payload, int pageSize) { + return ListClustersRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListClustersRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListClustersResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListClustersResponse payload) { + return payload.getClustersList(); + } + }; + + private static final PagedListResponseFactory< + ListClustersRequest, ListClustersResponse, ListClustersPagedResponse> + LIST_CLUSTERS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListClustersRequest, ListClustersResponse, ListClustersPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListClustersRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_CLUSTERS_PAGE_STR_DESC, request, context); + return ListClustersPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Builder for ClusterControllerStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder createClusterSettings; + private final OperationCallSettings.Builder< + CreateClusterRequest, Cluster, ClusterOperationMetadata> + createClusterOperationSettings; + private final UnaryCallSettings.Builder updateClusterSettings; + private final OperationCallSettings.Builder< + UpdateClusterRequest, Cluster, ClusterOperationMetadata> + updateClusterOperationSettings; + private final UnaryCallSettings.Builder deleteClusterSettings; + private final OperationCallSettings.Builder< + DeleteClusterRequest, Empty, ClusterOperationMetadata> + deleteClusterOperationSettings; + private final UnaryCallSettings.Builder getClusterSettings; + private final PagedCallSettings.Builder< + ListClustersRequest, ListClustersResponse, ListClustersPagedResponse> + listClustersSettings; + private final UnaryCallSettings.Builder + diagnoseClusterSettings; + private final OperationCallSettings.Builder< + DiagnoseClusterRequest, Empty, DiagnoseClusterResults> + diagnoseClusterOperationSettings; + + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "idempotent", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put("non_idempotent", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(10000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(10000L)) + .setTotalTimeout(Duration.ofMillis(300000L)) + .build(); + definitions.put("default", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + createClusterOperationSettings = OperationCallSettings.newBuilder(); + + updateClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + updateClusterOperationSettings = OperationCallSettings.newBuilder(); + + deleteClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + deleteClusterOperationSettings = OperationCallSettings.newBuilder(); + + getClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + listClustersSettings = PagedCallSettings.newBuilder(LIST_CLUSTERS_PAGE_STR_FACT); + + diagnoseClusterSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + diagnoseClusterOperationSettings = OperationCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createClusterSettings, + updateClusterSettings, + deleteClusterSettings, + getClusterSettings, + listClustersSettings, + diagnoseClusterSettings); + + initDefaults(this); + } + + private static Builder createDefault() { + Builder builder = new Builder((ClientContext) null); + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + + builder + .createClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .updateClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .deleteClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .getClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .listClustersSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .diagnoseClusterSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + builder + .createClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Cluster.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(ClusterOperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMillis(10000L)) + .setInitialRpcTimeout(Duration.ZERO) // ignored + .setRpcTimeoutMultiplier(1.0) // ignored + .setMaxRpcTimeout(Duration.ZERO) // ignored + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + builder + .updateClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Cluster.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(ClusterOperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMillis(10000L)) + .setInitialRpcTimeout(Duration.ZERO) // ignored + .setRpcTimeoutMultiplier(1.0) // ignored + .setMaxRpcTimeout(Duration.ZERO) // ignored + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + builder + .deleteClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Empty.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(ClusterOperationMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMillis(10000L)) + .setInitialRpcTimeout(Duration.ZERO) // ignored + .setRpcTimeoutMultiplier(1.0) // ignored + .setMaxRpcTimeout(Duration.ZERO) // ignored + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + builder + .diagnoseClusterOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Empty.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(DiagnoseClusterResults.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMillis(10000L)) + .setInitialRpcTimeout(Duration.ZERO) // ignored + .setRpcTimeoutMultiplier(1.0) // ignored + .setMaxRpcTimeout(Duration.ZERO) // ignored + .setTotalTimeout(Duration.ofMillis(30000L)) + .build())); + + return builder; + } + + protected Builder(ClusterControllerStubSettings settings) { + super(settings); + + createClusterSettings = settings.createClusterSettings.toBuilder(); + createClusterOperationSettings = settings.createClusterOperationSettings.toBuilder(); + updateClusterSettings = settings.updateClusterSettings.toBuilder(); + updateClusterOperationSettings = settings.updateClusterOperationSettings.toBuilder(); + deleteClusterSettings = settings.deleteClusterSettings.toBuilder(); + deleteClusterOperationSettings = settings.deleteClusterOperationSettings.toBuilder(); + getClusterSettings = settings.getClusterSettings.toBuilder(); + listClustersSettings = settings.listClustersSettings.toBuilder(); + diagnoseClusterSettings = settings.diagnoseClusterSettings.toBuilder(); + diagnoseClusterOperationSettings = settings.diagnoseClusterOperationSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createClusterSettings, + updateClusterSettings, + deleteClusterSettings, + getClusterSettings, + listClustersSettings, + diagnoseClusterSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createCluster. */ + public UnaryCallSettings.Builder createClusterSettings() { + return createClusterSettings; + } + + /** Returns the builder for the settings used for calls to createCluster. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + createClusterOperationSettings() { + return createClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateCluster. */ + public UnaryCallSettings.Builder updateClusterSettings() { + return updateClusterSettings; + } + + /** Returns the builder for the settings used for calls to updateCluster. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + updateClusterOperationSettings() { + return updateClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to deleteCluster. */ + public UnaryCallSettings.Builder deleteClusterSettings() { + return deleteClusterSettings; + } + + /** Returns the builder for the settings used for calls to deleteCluster. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + deleteClusterOperationSettings() { + return deleteClusterOperationSettings; + } + + /** Returns the builder for the settings used for calls to getCluster. */ + public UnaryCallSettings.Builder getClusterSettings() { + return getClusterSettings; + } + + /** Returns the builder for the settings used for calls to listClusters. */ + public PagedCallSettings.Builder< + ListClustersRequest, ListClustersResponse, ListClustersPagedResponse> + listClustersSettings() { + return listClustersSettings; + } + + /** Returns the builder for the settings used for calls to diagnoseCluster. */ + public UnaryCallSettings.Builder diagnoseClusterSettings() { + return diagnoseClusterSettings; + } + + /** Returns the builder for the settings used for calls to diagnoseCluster. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder + diagnoseClusterOperationSettings() { + return diagnoseClusterOperationSettings; + } + + @Override + public ClusterControllerStubSettings build() throws IOException { + return new ClusterControllerStubSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerCallableFactory.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerCallableFactory.java new file mode 100644 index 000000000000..c62cbb90e97c --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerCallableFactory.java @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC callable factory implementation for Google Cloud Dataproc API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator") +@BetaApi("The surface for use by generated code is not stable yet and may change in the future.") +public class GrpcClusterControllerCallableFactory implements GrpcStubCallableFactory { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings pagedCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable( + grpcCallSettings, pagedCallSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings batchingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, batchingCallSettings, clientContext); + } + + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings operationCallSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, operationCallSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerStub.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerStub.java new file mode 100644 index 000000000000..0be21db42d9b --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcClusterControllerStub.java @@ -0,0 +1,332 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.ClusterControllerClient.ListClustersPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.Cluster; +import com.google.cloud.dataproc.v1beta2.ClusterOperationMetadata; +import com.google.cloud.dataproc.v1beta2.CreateClusterRequest; +import com.google.cloud.dataproc.v1beta2.DeleteClusterRequest; +import com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest; +import com.google.cloud.dataproc.v1beta2.DiagnoseClusterResults; +import com.google.cloud.dataproc.v1beta2.GetClusterRequest; +import com.google.cloud.dataproc.v1beta2.ListClustersRequest; +import com.google.cloud.dataproc.v1beta2.ListClustersResponse; +import com.google.cloud.dataproc.v1beta2.UpdateClusterRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC stub implementation for Google Cloud Dataproc API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public class GrpcClusterControllerStub extends ClusterControllerStub { + + private static final MethodDescriptor + createClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.ClusterController/CreateCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + private static final MethodDescriptor + updateClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + private static final MethodDescriptor + deleteClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + private static final MethodDescriptor getClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.ClusterController/GetCluster") + .setRequestMarshaller(ProtoUtils.marshaller(GetClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Cluster.getDefaultInstance())) + .build(); + private static final MethodDescriptor + listClustersMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.ClusterController/ListClusters") + .setRequestMarshaller(ProtoUtils.marshaller(ListClustersRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListClustersResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor + diagnoseClusterMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster") + .setRequestMarshaller( + ProtoUtils.marshaller(DiagnoseClusterRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + + private final UnaryCallable createClusterCallable; + private final OperationCallable + createClusterOperationCallable; + private final UnaryCallable updateClusterCallable; + private final OperationCallable + updateClusterOperationCallable; + private final UnaryCallable deleteClusterCallable; + private final OperationCallable + deleteClusterOperationCallable; + private final UnaryCallable getClusterCallable; + private final UnaryCallable listClustersCallable; + private final UnaryCallable + listClustersPagedCallable; + private final UnaryCallable diagnoseClusterCallable; + private final OperationCallable + diagnoseClusterOperationCallable; + + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcClusterControllerStub create(ClusterControllerStubSettings settings) + throws IOException { + return new GrpcClusterControllerStub(settings, ClientContext.create(settings)); + } + + public static final GrpcClusterControllerStub create(ClientContext clientContext) + throws IOException { + return new GrpcClusterControllerStub( + ClusterControllerStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcClusterControllerStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcClusterControllerStub( + ClusterControllerStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcClusterControllerStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcClusterControllerStub( + ClusterControllerStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new GrpcClusterControllerCallableFactory()); + } + + /** + * Constructs an instance of GrpcClusterControllerStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcClusterControllerStub( + ClusterControllerStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createClusterMethodDescriptor) + .build(); + GrpcCallSettings updateClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateClusterMethodDescriptor) + .build(); + GrpcCallSettings deleteClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteClusterMethodDescriptor) + .build(); + GrpcCallSettings getClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getClusterMethodDescriptor) + .build(); + GrpcCallSettings listClustersTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listClustersMethodDescriptor) + .build(); + GrpcCallSettings diagnoseClusterTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(diagnoseClusterMethodDescriptor) + .build(); + + this.createClusterCallable = + callableFactory.createUnaryCallable( + createClusterTransportSettings, settings.createClusterSettings(), clientContext); + this.createClusterOperationCallable = + callableFactory.createOperationCallable( + createClusterTransportSettings, + settings.createClusterOperationSettings(), + clientContext, + this.operationsStub); + this.updateClusterCallable = + callableFactory.createUnaryCallable( + updateClusterTransportSettings, settings.updateClusterSettings(), clientContext); + this.updateClusterOperationCallable = + callableFactory.createOperationCallable( + updateClusterTransportSettings, + settings.updateClusterOperationSettings(), + clientContext, + this.operationsStub); + this.deleteClusterCallable = + callableFactory.createUnaryCallable( + deleteClusterTransportSettings, settings.deleteClusterSettings(), clientContext); + this.deleteClusterOperationCallable = + callableFactory.createOperationCallable( + deleteClusterTransportSettings, + settings.deleteClusterOperationSettings(), + clientContext, + this.operationsStub); + this.getClusterCallable = + callableFactory.createUnaryCallable( + getClusterTransportSettings, settings.getClusterSettings(), clientContext); + this.listClustersCallable = + callableFactory.createUnaryCallable( + listClustersTransportSettings, settings.listClustersSettings(), clientContext); + this.listClustersPagedCallable = + callableFactory.createPagedCallable( + listClustersTransportSettings, settings.listClustersSettings(), clientContext); + this.diagnoseClusterCallable = + callableFactory.createUnaryCallable( + diagnoseClusterTransportSettings, settings.diagnoseClusterSettings(), clientContext); + this.diagnoseClusterOperationCallable = + callableFactory.createOperationCallable( + diagnoseClusterTransportSettings, + settings.diagnoseClusterOperationSettings(), + clientContext, + this.operationsStub); + + backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + createClusterOperationCallable() { + return createClusterOperationCallable; + } + + public UnaryCallable createClusterCallable() { + return createClusterCallable; + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + updateClusterOperationCallable() { + return updateClusterOperationCallable; + } + + public UnaryCallable updateClusterCallable() { + return updateClusterCallable; + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + deleteClusterOperationCallable() { + return deleteClusterOperationCallable; + } + + public UnaryCallable deleteClusterCallable() { + return deleteClusterCallable; + } + + public UnaryCallable getClusterCallable() { + return getClusterCallable; + } + + public UnaryCallable listClustersPagedCallable() { + return listClustersPagedCallable; + } + + public UnaryCallable listClustersCallable() { + return listClustersCallable; + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + diagnoseClusterOperationCallable() { + return diagnoseClusterOperationCallable; + } + + public UnaryCallable diagnoseClusterCallable() { + return diagnoseClusterCallable; + } + + @Override + public final void close() { + shutdown(); + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerCallableFactory.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerCallableFactory.java new file mode 100644 index 000000000000..4ceb5dc1e382 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerCallableFactory.java @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC callable factory implementation for Google Cloud Dataproc API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator") +@BetaApi("The surface for use by generated code is not stable yet and may change in the future.") +public class GrpcJobControllerCallableFactory implements GrpcStubCallableFactory { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings pagedCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable( + grpcCallSettings, pagedCallSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings batchingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, batchingCallSettings, clientContext); + } + + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings operationCallSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, operationCallSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerStub.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerStub.java new file mode 100644 index 000000000000..8fd6500f579b --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcJobControllerStub.java @@ -0,0 +1,252 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.JobControllerClient.ListJobsPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.CancelJobRequest; +import com.google.cloud.dataproc.v1beta2.DeleteJobRequest; +import com.google.cloud.dataproc.v1beta2.GetJobRequest; +import com.google.cloud.dataproc.v1beta2.Job; +import com.google.cloud.dataproc.v1beta2.ListJobsRequest; +import com.google.cloud.dataproc.v1beta2.ListJobsResponse; +import com.google.cloud.dataproc.v1beta2.SubmitJobRequest; +import com.google.cloud.dataproc.v1beta2.UpdateJobRequest; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC stub implementation for Google Cloud Dataproc API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public class GrpcJobControllerStub extends JobControllerStub { + + private static final MethodDescriptor submitJobMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.JobController/SubmitJob") + .setRequestMarshaller(ProtoUtils.marshaller(SubmitJobRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Job.getDefaultInstance())) + .build(); + private static final MethodDescriptor getJobMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.JobController/GetJob") + .setRequestMarshaller(ProtoUtils.marshaller(GetJobRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Job.getDefaultInstance())) + .build(); + private static final MethodDescriptor + listJobsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.JobController/ListJobs") + .setRequestMarshaller(ProtoUtils.marshaller(ListJobsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ListJobsResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor updateJobMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.JobController/UpdateJob") + .setRequestMarshaller(ProtoUtils.marshaller(UpdateJobRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Job.getDefaultInstance())) + .build(); + private static final MethodDescriptor cancelJobMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.JobController/CancelJob") + .setRequestMarshaller(ProtoUtils.marshaller(CancelJobRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Job.getDefaultInstance())) + .build(); + private static final MethodDescriptor deleteJobMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.dataproc.v1beta2.JobController/DeleteJob") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteJobRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .build(); + + private final BackgroundResource backgroundResources; + + private final UnaryCallable submitJobCallable; + private final UnaryCallable getJobCallable; + private final UnaryCallable listJobsCallable; + private final UnaryCallable listJobsPagedCallable; + private final UnaryCallable updateJobCallable; + private final UnaryCallable cancelJobCallable; + private final UnaryCallable deleteJobCallable; + + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcJobControllerStub create(JobControllerStubSettings settings) + throws IOException { + return new GrpcJobControllerStub(settings, ClientContext.create(settings)); + } + + public static final GrpcJobControllerStub create(ClientContext clientContext) throws IOException { + return new GrpcJobControllerStub(JobControllerStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcJobControllerStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcJobControllerStub( + JobControllerStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcJobControllerStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcJobControllerStub(JobControllerStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcJobControllerCallableFactory()); + } + + /** + * Constructs an instance of GrpcJobControllerStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcJobControllerStub( + JobControllerStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + + GrpcCallSettings submitJobTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(submitJobMethodDescriptor) + .build(); + GrpcCallSettings getJobTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getJobMethodDescriptor) + .build(); + GrpcCallSettings listJobsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listJobsMethodDescriptor) + .build(); + GrpcCallSettings updateJobTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateJobMethodDescriptor) + .build(); + GrpcCallSettings cancelJobTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(cancelJobMethodDescriptor) + .build(); + GrpcCallSettings deleteJobTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteJobMethodDescriptor) + .build(); + + this.submitJobCallable = + callableFactory.createUnaryCallable( + submitJobTransportSettings, settings.submitJobSettings(), clientContext); + this.getJobCallable = + callableFactory.createUnaryCallable( + getJobTransportSettings, settings.getJobSettings(), clientContext); + this.listJobsCallable = + callableFactory.createUnaryCallable( + listJobsTransportSettings, settings.listJobsSettings(), clientContext); + this.listJobsPagedCallable = + callableFactory.createPagedCallable( + listJobsTransportSettings, settings.listJobsSettings(), clientContext); + this.updateJobCallable = + callableFactory.createUnaryCallable( + updateJobTransportSettings, settings.updateJobSettings(), clientContext); + this.cancelJobCallable = + callableFactory.createUnaryCallable( + cancelJobTransportSettings, settings.cancelJobSettings(), clientContext); + this.deleteJobCallable = + callableFactory.createUnaryCallable( + deleteJobTransportSettings, settings.deleteJobSettings(), clientContext); + + backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public UnaryCallable submitJobCallable() { + return submitJobCallable; + } + + public UnaryCallable getJobCallable() { + return getJobCallable; + } + + public UnaryCallable listJobsPagedCallable() { + return listJobsPagedCallable; + } + + public UnaryCallable listJobsCallable() { + return listJobsCallable; + } + + public UnaryCallable updateJobCallable() { + return updateJobCallable; + } + + public UnaryCallable cancelJobCallable() { + return cancelJobCallable; + } + + public UnaryCallable deleteJobCallable() { + return deleteJobCallable; + } + + @Override + public final void close() { + shutdown(); + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceCallableFactory.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceCallableFactory.java new file mode 100644 index 000000000000..3e0d8fa85341 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceCallableFactory.java @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC callable factory implementation for Google Cloud Dataproc API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator") +@BetaApi("The surface for use by generated code is not stable yet and may change in the future.") +public class GrpcWorkflowTemplateServiceCallableFactory implements GrpcStubCallableFactory { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings pagedCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable( + grpcCallSettings, pagedCallSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings batchingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, batchingCallSettings, clientContext); + } + + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings operationCallSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, operationCallSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceStub.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceStub.java new file mode 100644 index 000000000000..ca8eae106bd4 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/GrpcWorkflowTemplateServiceStub.java @@ -0,0 +1,329 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.WorkflowTemplateServiceClient.ListWorkflowTemplatesPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest; +import com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse; +import com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.WorkflowMetadata; +import com.google.cloud.dataproc.v1beta2.WorkflowTemplate; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC stub implementation for Google Cloud Dataproc API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public class GrpcWorkflowTemplateServiceStub extends WorkflowTemplateServiceStub { + + private static final MethodDescriptor + createWorkflowTemplateMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WorkflowTemplate.getDefaultInstance())) + .build(); + private static final MethodDescriptor + getWorkflowTemplateMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate") + .setRequestMarshaller( + ProtoUtils.marshaller(GetWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WorkflowTemplate.getDefaultInstance())) + .build(); + private static final MethodDescriptor + instantiateWorkflowTemplateMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate") + .setRequestMarshaller( + ProtoUtils.marshaller(InstantiateWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + private static final MethodDescriptor + updateWorkflowTemplateMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WorkflowTemplate.getDefaultInstance())) + .build(); + private static final MethodDescriptor + listWorkflowTemplatesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates") + .setRequestMarshaller( + ProtoUtils.marshaller(ListWorkflowTemplatesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListWorkflowTemplatesResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor + deleteWorkflowTemplateMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteWorkflowTemplateRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .build(); + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + + private final UnaryCallable + createWorkflowTemplateCallable; + private final UnaryCallable + getWorkflowTemplateCallable; + private final UnaryCallable + instantiateWorkflowTemplateCallable; + private final OperationCallable + instantiateWorkflowTemplateOperationCallable; + private final UnaryCallable + updateWorkflowTemplateCallable; + private final UnaryCallable + listWorkflowTemplatesCallable; + private final UnaryCallable + listWorkflowTemplatesPagedCallable; + private final UnaryCallable deleteWorkflowTemplateCallable; + + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcWorkflowTemplateServiceStub create( + WorkflowTemplateServiceStubSettings settings) throws IOException { + return new GrpcWorkflowTemplateServiceStub(settings, ClientContext.create(settings)); + } + + public static final GrpcWorkflowTemplateServiceStub create(ClientContext clientContext) + throws IOException { + return new GrpcWorkflowTemplateServiceStub( + WorkflowTemplateServiceStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcWorkflowTemplateServiceStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcWorkflowTemplateServiceStub( + WorkflowTemplateServiceStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcWorkflowTemplateServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcWorkflowTemplateServiceStub( + WorkflowTemplateServiceStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcWorkflowTemplateServiceCallableFactory()); + } + + /** + * Constructs an instance of GrpcWorkflowTemplateServiceStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcWorkflowTemplateServiceStub( + WorkflowTemplateServiceStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + createWorkflowTemplateTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createWorkflowTemplateMethodDescriptor) + .build(); + GrpcCallSettings + getWorkflowTemplateTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getWorkflowTemplateMethodDescriptor) + .build(); + GrpcCallSettings + instantiateWorkflowTemplateTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(instantiateWorkflowTemplateMethodDescriptor) + .build(); + GrpcCallSettings + updateWorkflowTemplateTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateWorkflowTemplateMethodDescriptor) + .build(); + GrpcCallSettings + listWorkflowTemplatesTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(listWorkflowTemplatesMethodDescriptor) + .build(); + GrpcCallSettings deleteWorkflowTemplateTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteWorkflowTemplateMethodDescriptor) + .build(); + + this.createWorkflowTemplateCallable = + callableFactory.createUnaryCallable( + createWorkflowTemplateTransportSettings, + settings.createWorkflowTemplateSettings(), + clientContext); + this.getWorkflowTemplateCallable = + callableFactory.createUnaryCallable( + getWorkflowTemplateTransportSettings, + settings.getWorkflowTemplateSettings(), + clientContext); + this.instantiateWorkflowTemplateCallable = + callableFactory.createUnaryCallable( + instantiateWorkflowTemplateTransportSettings, + settings.instantiateWorkflowTemplateSettings(), + clientContext); + this.instantiateWorkflowTemplateOperationCallable = + callableFactory.createOperationCallable( + instantiateWorkflowTemplateTransportSettings, + settings.instantiateWorkflowTemplateOperationSettings(), + clientContext, + this.operationsStub); + this.updateWorkflowTemplateCallable = + callableFactory.createUnaryCallable( + updateWorkflowTemplateTransportSettings, + settings.updateWorkflowTemplateSettings(), + clientContext); + this.listWorkflowTemplatesCallable = + callableFactory.createUnaryCallable( + listWorkflowTemplatesTransportSettings, + settings.listWorkflowTemplatesSettings(), + clientContext); + this.listWorkflowTemplatesPagedCallable = + callableFactory.createPagedCallable( + listWorkflowTemplatesTransportSettings, + settings.listWorkflowTemplatesSettings(), + clientContext); + this.deleteWorkflowTemplateCallable = + callableFactory.createUnaryCallable( + deleteWorkflowTemplateTransportSettings, + settings.deleteWorkflowTemplateSettings(), + clientContext); + + backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + public UnaryCallable + createWorkflowTemplateCallable() { + return createWorkflowTemplateCallable; + } + + public UnaryCallable getWorkflowTemplateCallable() { + return getWorkflowTemplateCallable; + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + instantiateWorkflowTemplateOperationCallable() { + return instantiateWorkflowTemplateOperationCallable; + } + + public UnaryCallable + instantiateWorkflowTemplateCallable() { + return instantiateWorkflowTemplateCallable; + } + + public UnaryCallable + updateWorkflowTemplateCallable() { + return updateWorkflowTemplateCallable; + } + + public UnaryCallable + listWorkflowTemplatesPagedCallable() { + return listWorkflowTemplatesPagedCallable; + } + + public UnaryCallable + listWorkflowTemplatesCallable() { + return listWorkflowTemplatesCallable; + } + + public UnaryCallable deleteWorkflowTemplateCallable() { + return deleteWorkflowTemplateCallable; + } + + @Override + public final void close() { + shutdown(); + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStub.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStub.java new file mode 100644 index 000000000000..98c36f652c0d --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStub.java @@ -0,0 +1,74 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.JobControllerClient.ListJobsPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.CancelJobRequest; +import com.google.cloud.dataproc.v1beta2.DeleteJobRequest; +import com.google.cloud.dataproc.v1beta2.GetJobRequest; +import com.google.cloud.dataproc.v1beta2.Job; +import com.google.cloud.dataproc.v1beta2.ListJobsRequest; +import com.google.cloud.dataproc.v1beta2.ListJobsResponse; +import com.google.cloud.dataproc.v1beta2.SubmitJobRequest; +import com.google.cloud.dataproc.v1beta2.UpdateJobRequest; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Base stub class for Google Cloud Dataproc API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public abstract class JobControllerStub implements BackgroundResource { + + public UnaryCallable submitJobCallable() { + throw new UnsupportedOperationException("Not implemented: submitJobCallable()"); + } + + public UnaryCallable getJobCallable() { + throw new UnsupportedOperationException("Not implemented: getJobCallable()"); + } + + public UnaryCallable listJobsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listJobsPagedCallable()"); + } + + public UnaryCallable listJobsCallable() { + throw new UnsupportedOperationException("Not implemented: listJobsCallable()"); + } + + public UnaryCallable updateJobCallable() { + throw new UnsupportedOperationException("Not implemented: updateJobCallable()"); + } + + public UnaryCallable cancelJobCallable() { + throw new UnsupportedOperationException("Not implemented: cancelJobCallable()"); + } + + public UnaryCallable deleteJobCallable() { + throw new UnsupportedOperationException("Not implemented: deleteJobCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStubSettings.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStubSettings.java new file mode 100644 index 000000000000..9ec7538a4627 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/JobControllerStubSettings.java @@ -0,0 +1,452 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.JobControllerClient.ListJobsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.CancelJobRequest; +import com.google.cloud.dataproc.v1beta2.DeleteJobRequest; +import com.google.cloud.dataproc.v1beta2.GetJobRequest; +import com.google.cloud.dataproc.v1beta2.Job; +import com.google.cloud.dataproc.v1beta2.ListJobsRequest; +import com.google.cloud.dataproc.v1beta2.ListJobsResponse; +import com.google.cloud.dataproc.v1beta2.SubmitJobRequest; +import com.google.cloud.dataproc.v1beta2.UpdateJobRequest; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link JobControllerStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of submitJob to 30 seconds: + * + *

+ * 
+ * JobControllerStubSettings.Builder jobControllerSettingsBuilder =
+ *     JobControllerStubSettings.newBuilder();
+ * jobControllerSettingsBuilder.submitJobSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * JobControllerStubSettings jobControllerSettings = jobControllerSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class JobControllerStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().add("https://www.googleapis.com/auth/cloud-platform").build(); + + private final UnaryCallSettings submitJobSettings; + private final UnaryCallSettings getJobSettings; + private final PagedCallSettings + listJobsSettings; + private final UnaryCallSettings updateJobSettings; + private final UnaryCallSettings cancelJobSettings; + private final UnaryCallSettings deleteJobSettings; + + /** Returns the object with the settings used for calls to submitJob. */ + public UnaryCallSettings submitJobSettings() { + return submitJobSettings; + } + + /** Returns the object with the settings used for calls to getJob. */ + public UnaryCallSettings getJobSettings() { + return getJobSettings; + } + + /** Returns the object with the settings used for calls to listJobs. */ + public PagedCallSettings + listJobsSettings() { + return listJobsSettings; + } + + /** Returns the object with the settings used for calls to updateJob. */ + public UnaryCallSettings updateJobSettings() { + return updateJobSettings; + } + + /** Returns the object with the settings used for calls to cancelJob. */ + public UnaryCallSettings cancelJobSettings() { + return cancelJobSettings; + } + + /** Returns the object with the settings used for calls to deleteJob. */ + public UnaryCallSettings deleteJobSettings() { + return deleteJobSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public JobControllerStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcJobControllerStub.create(this); + } else { + throw new UnsupportedOperationException( + "Transport not supported: " + getTransportChannelProvider().getTransportName()); + } + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "dataproc.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(JobControllerStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected JobControllerStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + submitJobSettings = settingsBuilder.submitJobSettings().build(); + getJobSettings = settingsBuilder.getJobSettings().build(); + listJobsSettings = settingsBuilder.listJobsSettings().build(); + updateJobSettings = settingsBuilder.updateJobSettings().build(); + cancelJobSettings = settingsBuilder.cancelJobSettings().build(); + deleteJobSettings = settingsBuilder.deleteJobSettings().build(); + } + + private static final PagedListDescriptor + LIST_JOBS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListJobsRequest injectToken(ListJobsRequest payload, String token) { + return ListJobsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListJobsRequest injectPageSize(ListJobsRequest payload, int pageSize) { + return ListJobsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListJobsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListJobsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListJobsResponse payload) { + return payload.getJobsList(); + } + }; + + private static final PagedListResponseFactory< + ListJobsRequest, ListJobsResponse, ListJobsPagedResponse> + LIST_JOBS_PAGE_STR_FACT = + new PagedListResponseFactory() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListJobsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_JOBS_PAGE_STR_DESC, request, context); + return ListJobsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Builder for JobControllerStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder submitJobSettings; + private final UnaryCallSettings.Builder getJobSettings; + private final PagedCallSettings.Builder< + ListJobsRequest, ListJobsResponse, ListJobsPagedResponse> + listJobsSettings; + private final UnaryCallSettings.Builder updateJobSettings; + private final UnaryCallSettings.Builder cancelJobSettings; + private final UnaryCallSettings.Builder deleteJobSettings; + + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "idempotent", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put("non_idempotent", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(30000L)) + .setTotalTimeout(Duration.ofMillis(900000L)) + .build(); + definitions.put("default", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + submitJobSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + getJobSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + listJobsSettings = PagedCallSettings.newBuilder(LIST_JOBS_PAGE_STR_FACT); + + updateJobSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + cancelJobSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + deleteJobSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + submitJobSettings, + getJobSettings, + listJobsSettings, + updateJobSettings, + cancelJobSettings, + deleteJobSettings); + + initDefaults(this); + } + + private static Builder createDefault() { + Builder builder = new Builder((ClientContext) null); + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + + builder + .submitJobSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .getJobSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .listJobsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .updateJobSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .cancelJobSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .deleteJobSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + return builder; + } + + protected Builder(JobControllerStubSettings settings) { + super(settings); + + submitJobSettings = settings.submitJobSettings.toBuilder(); + getJobSettings = settings.getJobSettings.toBuilder(); + listJobsSettings = settings.listJobsSettings.toBuilder(); + updateJobSettings = settings.updateJobSettings.toBuilder(); + cancelJobSettings = settings.cancelJobSettings.toBuilder(); + deleteJobSettings = settings.deleteJobSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + submitJobSettings, + getJobSettings, + listJobsSettings, + updateJobSettings, + cancelJobSettings, + deleteJobSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to submitJob. */ + public UnaryCallSettings.Builder submitJobSettings() { + return submitJobSettings; + } + + /** Returns the builder for the settings used for calls to getJob. */ + public UnaryCallSettings.Builder getJobSettings() { + return getJobSettings; + } + + /** Returns the builder for the settings used for calls to listJobs. */ + public PagedCallSettings.Builder + listJobsSettings() { + return listJobsSettings; + } + + /** Returns the builder for the settings used for calls to updateJob. */ + public UnaryCallSettings.Builder updateJobSettings() { + return updateJobSettings; + } + + /** Returns the builder for the settings used for calls to cancelJob. */ + public UnaryCallSettings.Builder cancelJobSettings() { + return cancelJobSettings; + } + + /** Returns the builder for the settings used for calls to deleteJob. */ + public UnaryCallSettings.Builder deleteJobSettings() { + return deleteJobSettings; + } + + @Override + public JobControllerStubSettings build() throws IOException { + return new JobControllerStubSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStub.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStub.java new file mode 100644 index 000000000000..f54b4da975a5 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStub.java @@ -0,0 +1,97 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.WorkflowTemplateServiceClient.ListWorkflowTemplatesPagedResponse; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest; +import com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse; +import com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.WorkflowMetadata; +import com.google.cloud.dataproc.v1beta2.WorkflowTemplate; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Base stub class for Google Cloud Dataproc API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public abstract class WorkflowTemplateServiceStub implements BackgroundResource { + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationsStub getOperationsStub() { + throw new UnsupportedOperationException("Not implemented: getOperationsStub()"); + } + + public UnaryCallable + createWorkflowTemplateCallable() { + throw new UnsupportedOperationException("Not implemented: createWorkflowTemplateCallable()"); + } + + public UnaryCallable getWorkflowTemplateCallable() { + throw new UnsupportedOperationException("Not implemented: getWorkflowTemplateCallable()"); + } + + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallable + instantiateWorkflowTemplateOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: instantiateWorkflowTemplateOperationCallable()"); + } + + public UnaryCallable + instantiateWorkflowTemplateCallable() { + throw new UnsupportedOperationException( + "Not implemented: instantiateWorkflowTemplateCallable()"); + } + + public UnaryCallable + updateWorkflowTemplateCallable() { + throw new UnsupportedOperationException("Not implemented: updateWorkflowTemplateCallable()"); + } + + public UnaryCallable + listWorkflowTemplatesPagedCallable() { + throw new UnsupportedOperationException( + "Not implemented: listWorkflowTemplatesPagedCallable()"); + } + + public UnaryCallable + listWorkflowTemplatesCallable() { + throw new UnsupportedOperationException("Not implemented: listWorkflowTemplatesCallable()"); + } + + public UnaryCallable deleteWorkflowTemplateCallable() { + throw new UnsupportedOperationException("Not implemented: deleteWorkflowTemplateCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStubSettings.java b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStubSettings.java new file mode 100644 index 000000000000..d0915d036d6f --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1beta2/stub/WorkflowTemplateServiceStubSettings.java @@ -0,0 +1,551 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2.stub; + +import static com.google.cloud.dataproc.v1beta2.WorkflowTemplateServiceClient.ListWorkflowTemplatesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest; +import com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse; +import com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest; +import com.google.cloud.dataproc.v1beta2.WorkflowMetadata; +import com.google.cloud.dataproc.v1beta2.WorkflowTemplate; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link WorkflowTemplateServiceStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (dataproc.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of createWorkflowTemplate to 30 seconds: + * + *

+ * 
+ * WorkflowTemplateServiceStubSettings.Builder workflowTemplateServiceSettingsBuilder =
+ *     WorkflowTemplateServiceStubSettings.newBuilder();
+ * workflowTemplateServiceSettingsBuilder.createWorkflowTemplateSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * WorkflowTemplateServiceStubSettings workflowTemplateServiceSettings = workflowTemplateServiceSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class WorkflowTemplateServiceStubSettings + extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().add("https://www.googleapis.com/auth/cloud-platform").build(); + + private final UnaryCallSettings + createWorkflowTemplateSettings; + private final UnaryCallSettings + getWorkflowTemplateSettings; + private final UnaryCallSettings + instantiateWorkflowTemplateSettings; + private final OperationCallSettings + instantiateWorkflowTemplateOperationSettings; + private final UnaryCallSettings + updateWorkflowTemplateSettings; + private final PagedCallSettings< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + listWorkflowTemplatesSettings; + private final UnaryCallSettings + deleteWorkflowTemplateSettings; + + /** Returns the object with the settings used for calls to createWorkflowTemplate. */ + public UnaryCallSettings + createWorkflowTemplateSettings() { + return createWorkflowTemplateSettings; + } + + /** Returns the object with the settings used for calls to getWorkflowTemplate. */ + public UnaryCallSettings + getWorkflowTemplateSettings() { + return getWorkflowTemplateSettings; + } + + /** Returns the object with the settings used for calls to instantiateWorkflowTemplate. */ + public UnaryCallSettings + instantiateWorkflowTemplateSettings() { + return instantiateWorkflowTemplateSettings; + } + + /** Returns the object with the settings used for calls to instantiateWorkflowTemplate. */ + @BetaApi("The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings + instantiateWorkflowTemplateOperationSettings() { + return instantiateWorkflowTemplateOperationSettings; + } + + /** Returns the object with the settings used for calls to updateWorkflowTemplate. */ + public UnaryCallSettings + updateWorkflowTemplateSettings() { + return updateWorkflowTemplateSettings; + } + + /** Returns the object with the settings used for calls to listWorkflowTemplates. */ + public PagedCallSettings< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + listWorkflowTemplatesSettings() { + return listWorkflowTemplatesSettings; + } + + /** Returns the object with the settings used for calls to deleteWorkflowTemplate. */ + public UnaryCallSettings deleteWorkflowTemplateSettings() { + return deleteWorkflowTemplateSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public WorkflowTemplateServiceStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcWorkflowTemplateServiceStub.create(this); + } else { + throw new UnsupportedOperationException( + "Transport not supported: " + getTransportChannelProvider().getTransportName()); + } + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "dataproc.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(WorkflowTemplateServiceStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected WorkflowTemplateServiceStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createWorkflowTemplateSettings = settingsBuilder.createWorkflowTemplateSettings().build(); + getWorkflowTemplateSettings = settingsBuilder.getWorkflowTemplateSettings().build(); + instantiateWorkflowTemplateSettings = + settingsBuilder.instantiateWorkflowTemplateSettings().build(); + instantiateWorkflowTemplateOperationSettings = + settingsBuilder.instantiateWorkflowTemplateOperationSettings().build(); + updateWorkflowTemplateSettings = settingsBuilder.updateWorkflowTemplateSettings().build(); + listWorkflowTemplatesSettings = settingsBuilder.listWorkflowTemplatesSettings().build(); + deleteWorkflowTemplateSettings = settingsBuilder.deleteWorkflowTemplateSettings().build(); + } + + private static final PagedListDescriptor< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, WorkflowTemplate> + LIST_WORKFLOW_TEMPLATES_PAGE_STR_DESC = + new PagedListDescriptor< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, WorkflowTemplate>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListWorkflowTemplatesRequest injectToken( + ListWorkflowTemplatesRequest payload, String token) { + return ListWorkflowTemplatesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListWorkflowTemplatesRequest injectPageSize( + ListWorkflowTemplatesRequest payload, int pageSize) { + return ListWorkflowTemplatesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListWorkflowTemplatesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListWorkflowTemplatesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources( + ListWorkflowTemplatesResponse payload) { + return payload.getTemplatesList(); + } + }; + + private static final PagedListResponseFactory< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + LIST_WORKFLOW_TEMPLATES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListWorkflowTemplatesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, WorkflowTemplate> + pageContext = + PageContext.create( + callable, LIST_WORKFLOW_TEMPLATES_PAGE_STR_DESC, request, context); + return ListWorkflowTemplatesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Builder for WorkflowTemplateServiceStubSettings. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + createWorkflowTemplateSettings; + private final UnaryCallSettings.Builder + getWorkflowTemplateSettings; + private final UnaryCallSettings.Builder + instantiateWorkflowTemplateSettings; + private final OperationCallSettings.Builder< + InstantiateWorkflowTemplateRequest, Empty, WorkflowMetadata> + instantiateWorkflowTemplateOperationSettings; + private final UnaryCallSettings.Builder + updateWorkflowTemplateSettings; + private final PagedCallSettings.Builder< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + listWorkflowTemplatesSettings; + private final UnaryCallSettings.Builder + deleteWorkflowTemplateSettings; + + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "idempotent", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put("non_idempotent", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(20000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(20000L)) + .setTotalTimeout(Duration.ofMillis(600000L)) + .build(); + definitions.put("default", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createWorkflowTemplateSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + getWorkflowTemplateSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + instantiateWorkflowTemplateSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + instantiateWorkflowTemplateOperationSettings = OperationCallSettings.newBuilder(); + + updateWorkflowTemplateSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + listWorkflowTemplatesSettings = + PagedCallSettings.newBuilder(LIST_WORKFLOW_TEMPLATES_PAGE_STR_FACT); + + deleteWorkflowTemplateSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWorkflowTemplateSettings, + getWorkflowTemplateSettings, + instantiateWorkflowTemplateSettings, + updateWorkflowTemplateSettings, + listWorkflowTemplatesSettings, + deleteWorkflowTemplateSettings); + + initDefaults(this); + } + + private static Builder createDefault() { + Builder builder = new Builder((ClientContext) null); + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + + builder + .createWorkflowTemplateSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .getWorkflowTemplateSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .instantiateWorkflowTemplateSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .updateWorkflowTemplateSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .listWorkflowTemplatesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .deleteWorkflowTemplateSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + builder + .instantiateWorkflowTemplateOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + . + newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Empty.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(WorkflowMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMillis(10000L)) + .setInitialRpcTimeout(Duration.ZERO) // ignored + .setRpcTimeoutMultiplier(1.0) // ignored + .setMaxRpcTimeout(Duration.ZERO) // ignored + .setTotalTimeout(Duration.ofMillis(43200000L)) + .build())); + + return builder; + } + + protected Builder(WorkflowTemplateServiceStubSettings settings) { + super(settings); + + createWorkflowTemplateSettings = settings.createWorkflowTemplateSettings.toBuilder(); + getWorkflowTemplateSettings = settings.getWorkflowTemplateSettings.toBuilder(); + instantiateWorkflowTemplateSettings = + settings.instantiateWorkflowTemplateSettings.toBuilder(); + instantiateWorkflowTemplateOperationSettings = + settings.instantiateWorkflowTemplateOperationSettings.toBuilder(); + updateWorkflowTemplateSettings = settings.updateWorkflowTemplateSettings.toBuilder(); + listWorkflowTemplatesSettings = settings.listWorkflowTemplatesSettings.toBuilder(); + deleteWorkflowTemplateSettings = settings.deleteWorkflowTemplateSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWorkflowTemplateSettings, + getWorkflowTemplateSettings, + instantiateWorkflowTemplateSettings, + updateWorkflowTemplateSettings, + listWorkflowTemplatesSettings, + deleteWorkflowTemplateSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createWorkflowTemplate. */ + public UnaryCallSettings.Builder + createWorkflowTemplateSettings() { + return createWorkflowTemplateSettings; + } + + /** Returns the builder for the settings used for calls to getWorkflowTemplate. */ + public UnaryCallSettings.Builder + getWorkflowTemplateSettings() { + return getWorkflowTemplateSettings; + } + + /** Returns the builder for the settings used for calls to instantiateWorkflowTemplate. */ + public UnaryCallSettings.Builder + instantiateWorkflowTemplateSettings() { + return instantiateWorkflowTemplateSettings; + } + + /** Returns the builder for the settings used for calls to instantiateWorkflowTemplate. */ + @BetaApi( + "The surface for use by generated code is not stable yet and may change in the future.") + public OperationCallSettings.Builder< + InstantiateWorkflowTemplateRequest, Empty, WorkflowMetadata> + instantiateWorkflowTemplateOperationSettings() { + return instantiateWorkflowTemplateOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateWorkflowTemplate. */ + public UnaryCallSettings.Builder + updateWorkflowTemplateSettings() { + return updateWorkflowTemplateSettings; + } + + /** Returns the builder for the settings used for calls to listWorkflowTemplates. */ + public PagedCallSettings.Builder< + ListWorkflowTemplatesRequest, ListWorkflowTemplatesResponse, + ListWorkflowTemplatesPagedResponse> + listWorkflowTemplatesSettings() { + return listWorkflowTemplatesSettings; + } + + /** Returns the builder for the settings used for calls to deleteWorkflowTemplate. */ + public UnaryCallSettings.Builder + deleteWorkflowTemplateSettings() { + return deleteWorkflowTemplateSettings; + } + + @Override + public WorkflowTemplateServiceStubSettings build() throws IOException { + return new WorkflowTemplateServiceStubSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClientTest.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClientTest.java new file mode 100644 index 000000000000..cfa9e8c48293 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerClientTest.java @@ -0,0 +1,356 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.ClusterControllerClient.ListClustersPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class ClusterControllerClientTest { + private static MockClusterController mockClusterController; + private static MockJobController mockJobController; + private static MockWorkflowTemplateService mockWorkflowTemplateService; + private static MockServiceHelper serviceHelper; + private ClusterControllerClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockClusterController = new MockClusterController(); + mockJobController = new MockJobController(); + mockWorkflowTemplateService = new MockWorkflowTemplateService(); + serviceHelper = + new MockServiceHelper( + "in-process-1", + Arrays.asList( + mockClusterController, mockJobController, mockWorkflowTemplateService)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + ClusterControllerSettings settings = + ClusterControllerSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = ClusterControllerClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + public void createClusterTest() throws Exception { + String projectId2 = "projectId2939242356"; + String clusterName = "clusterName-1018081872"; + String clusterUuid = "clusterUuid-1017854240"; + Cluster expectedResponse = + Cluster.newBuilder() + .setProjectId(projectId2) + .setClusterName(clusterName) + .setClusterUuid(clusterUuid) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockClusterController.addResponse(resultOperation); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + Cluster cluster = Cluster.newBuilder().build(); + + Cluster actualResponse = client.createClusterAsync(projectId, region, cluster).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateClusterRequest actualRequest = (CreateClusterRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(cluster, actualRequest.getCluster()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void createClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + Cluster cluster = Cluster.newBuilder().build(); + + client.createClusterAsync(projectId, region, cluster).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + @SuppressWarnings("all") + public void deleteClusterTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("deleteClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockClusterController.addResponse(resultOperation); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String clusterName = "clusterName-1018081872"; + + Empty actualResponse = client.deleteClusterAsync(projectId, region, clusterName).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteClusterRequest actualRequest = (DeleteClusterRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(clusterName, actualRequest.getClusterName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void deleteClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String clusterName = "clusterName-1018081872"; + + client.deleteClusterAsync(projectId, region, clusterName).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + @SuppressWarnings("all") + public void getClusterTest() { + String projectId2 = "projectId2939242356"; + String clusterName2 = "clusterName2875867491"; + String clusterUuid = "clusterUuid-1017854240"; + Cluster expectedResponse = + Cluster.newBuilder() + .setProjectId(projectId2) + .setClusterName(clusterName2) + .setClusterUuid(clusterUuid) + .build(); + mockClusterController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String clusterName = "clusterName-1018081872"; + + Cluster actualResponse = client.getCluster(projectId, region, clusterName); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetClusterRequest actualRequest = (GetClusterRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(clusterName, actualRequest.getClusterName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void getClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String clusterName = "clusterName-1018081872"; + + client.getCluster(projectId, region, clusterName); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void listClustersTest() { + String nextPageToken = ""; + Cluster clustersElement = Cluster.newBuilder().build(); + List clusters = Arrays.asList(clustersElement); + ListClustersResponse expectedResponse = + ListClustersResponse.newBuilder() + .setNextPageToken(nextPageToken) + .addAllClusters(clusters) + .build(); + mockClusterController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + + ListClustersPagedResponse pagedListResponse = client.listClusters(projectId, region); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getClustersList().get(0), resources.get(0)); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListClustersRequest actualRequest = (ListClustersRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void listClustersExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + + client.listClusters(projectId, region); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void diagnoseClusterTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("diagnoseClusterTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockClusterController.addResponse(resultOperation); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String clusterName = "clusterName-1018081872"; + + Empty actualResponse = client.diagnoseClusterAsync(projectId, region, clusterName).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockClusterController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DiagnoseClusterRequest actualRequest = (DiagnoseClusterRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(clusterName, actualRequest.getClusterName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void diagnoseClusterExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockClusterController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String clusterName = "clusterName-1018081872"; + + client.diagnoseClusterAsync(projectId, region, clusterName).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSmokeTest.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSmokeTest.java new file mode 100644 index 000000000000..203aded85de6 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/ClusterControllerSmokeTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.ClusterControllerClient.ListClustersPagedResponse; + +import com.google.common.base.Preconditions; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class ClusterControllerSmokeTest { + private static final String PROJECT_ENV_NAME = "GOOGLE_CLOUD_PROJECT"; + private static final String LEGACY_PROJECT_ENV_NAME = "GCLOUD_PROJECT"; + + @Test + public void run() { + main(null); + } + + public static void main(String args[]) { + Logger.getLogger("").setLevel(Level.WARNING); + try { + executeNoCatch(getProjectId()); + System.out.println("OK"); + } catch (Exception e) { + System.err.println("Failed with exception:"); + e.printStackTrace(System.err); + System.exit(1); + } + } + + public static void executeNoCatch(String projectId) throws Exception { + try (ClusterControllerClient client = ClusterControllerClient.create()) { + String projectId2 = projectId; + String region = "global"; + + ListClustersPagedResponse pagedResponse = client.listClusters(projectId2, region); + } + } + + private static String getProjectId() { + String projectId = System.getProperty(PROJECT_ENV_NAME, System.getenv(PROJECT_ENV_NAME)); + if (projectId == null) { + projectId = + System.getProperty(LEGACY_PROJECT_ENV_NAME, System.getenv(LEGACY_PROJECT_ENV_NAME)); + } + Preconditions.checkArgument(projectId != null, "A project ID is required."); + return projectId; + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/JobControllerClientTest.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/JobControllerClientTest.java new file mode 100644 index 000000000000..89b3939d2dd2 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/JobControllerClientTest.java @@ -0,0 +1,326 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.JobControllerClient.ListJobsPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class JobControllerClientTest { + private static MockClusterController mockClusterController; + private static MockJobController mockJobController; + private static MockWorkflowTemplateService mockWorkflowTemplateService; + private static MockServiceHelper serviceHelper; + private JobControllerClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockClusterController = new MockClusterController(); + mockJobController = new MockJobController(); + mockWorkflowTemplateService = new MockWorkflowTemplateService(); + serviceHelper = + new MockServiceHelper( + "in-process-1", + Arrays.asList( + mockClusterController, mockJobController, mockWorkflowTemplateService)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + JobControllerSettings settings = + JobControllerSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = JobControllerClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + public void submitJobTest() { + String driverOutputResourceUri = "driverOutputResourceUri-542229086"; + String driverControlFilesUri = "driverControlFilesUri207057643"; + Job expectedResponse = + Job.newBuilder() + .setDriverOutputResourceUri(driverOutputResourceUri) + .setDriverControlFilesUri(driverControlFilesUri) + .build(); + mockJobController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + Job job = Job.newBuilder().build(); + + Job actualResponse = client.submitJob(projectId, region, job); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockJobController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SubmitJobRequest actualRequest = (SubmitJobRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(job, actualRequest.getJob()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void submitJobExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockJobController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + Job job = Job.newBuilder().build(); + + client.submitJob(projectId, region, job); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void getJobTest() { + String driverOutputResourceUri = "driverOutputResourceUri-542229086"; + String driverControlFilesUri = "driverControlFilesUri207057643"; + Job expectedResponse = + Job.newBuilder() + .setDriverOutputResourceUri(driverOutputResourceUri) + .setDriverControlFilesUri(driverControlFilesUri) + .build(); + mockJobController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String jobId = "jobId-1154752291"; + + Job actualResponse = client.getJob(projectId, region, jobId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockJobController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetJobRequest actualRequest = (GetJobRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(jobId, actualRequest.getJobId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void getJobExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockJobController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String jobId = "jobId-1154752291"; + + client.getJob(projectId, region, jobId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void listJobsTest() { + String nextPageToken = ""; + Job jobsElement = Job.newBuilder().build(); + List jobs = Arrays.asList(jobsElement); + ListJobsResponse expectedResponse = + ListJobsResponse.newBuilder().setNextPageToken(nextPageToken).addAllJobs(jobs).build(); + mockJobController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + + ListJobsPagedResponse pagedListResponse = client.listJobs(projectId, region); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getJobsList().get(0), resources.get(0)); + + List actualRequests = mockJobController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListJobsRequest actualRequest = (ListJobsRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void listJobsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockJobController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + + client.listJobs(projectId, region); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void cancelJobTest() { + String driverOutputResourceUri = "driverOutputResourceUri-542229086"; + String driverControlFilesUri = "driverControlFilesUri207057643"; + Job expectedResponse = + Job.newBuilder() + .setDriverOutputResourceUri(driverOutputResourceUri) + .setDriverControlFilesUri(driverControlFilesUri) + .build(); + mockJobController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String jobId = "jobId-1154752291"; + + Job actualResponse = client.cancelJob(projectId, region, jobId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockJobController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CancelJobRequest actualRequest = (CancelJobRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(jobId, actualRequest.getJobId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void cancelJobExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockJobController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String jobId = "jobId-1154752291"; + + client.cancelJob(projectId, region, jobId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void deleteJobTest() { + Empty expectedResponse = Empty.newBuilder().build(); + mockJobController.addResponse(expectedResponse); + + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String jobId = "jobId-1154752291"; + + client.deleteJob(projectId, region, jobId); + + List actualRequests = mockJobController.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteJobRequest actualRequest = (DeleteJobRequest) actualRequests.get(0); + + Assert.assertEquals(projectId, actualRequest.getProjectId()); + Assert.assertEquals(region, actualRequest.getRegion()); + Assert.assertEquals(jobId, actualRequest.getJobId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void deleteJobExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockJobController.addException(exception); + + try { + String projectId = "projectId-1969970175"; + String region = "region-934795532"; + String jobId = "jobId-1154752291"; + + client.deleteJob(projectId, region, jobId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterController.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterController.java new file mode 100644 index 000000000000..7f2b12ed892b --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterController.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.ServerServiceDefinition; +import java.util.List; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockClusterController implements MockGrpcService { + private final MockClusterControllerImpl serviceImpl; + + public MockClusterController() { + serviceImpl = new MockClusterControllerImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(GeneratedMessageV3 response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterControllerImpl.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterControllerImpl.java new file mode 100644 index 000000000000..5981c106734e --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockClusterControllerImpl.java @@ -0,0 +1,148 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.cloud.dataproc.v1beta2.ClusterControllerGrpc.ClusterControllerImplBase; +import com.google.longrunning.Operation; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockClusterControllerImpl extends ClusterControllerImplBase { + private ArrayList requests; + private Queue responses; + + public MockClusterControllerImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(GeneratedMessageV3 response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createCluster( + CreateClusterRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext((Operation) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void updateCluster( + UpdateClusterRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext((Operation) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void deleteCluster( + DeleteClusterRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext((Operation) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void getCluster(GetClusterRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Cluster) { + requests.add(request); + responseObserver.onNext((Cluster) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void listClusters( + ListClustersRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof ListClustersResponse) { + requests.add(request); + responseObserver.onNext((ListClustersResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void diagnoseCluster( + DiagnoseClusterRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext((Operation) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobController.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobController.java new file mode 100644 index 000000000000..304e10d06c24 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobController.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.ServerServiceDefinition; +import java.util.List; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockJobController implements MockGrpcService { + private final MockJobControllerImpl serviceImpl; + + public MockJobController() { + serviceImpl = new MockJobControllerImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(GeneratedMessageV3 response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobControllerImpl.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobControllerImpl.java new file mode 100644 index 000000000000..4a47fc125c3d --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockJobControllerImpl.java @@ -0,0 +1,143 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.cloud.dataproc.v1beta2.JobControllerGrpc.JobControllerImplBase; +import com.google.protobuf.Empty; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockJobControllerImpl extends JobControllerImplBase { + private ArrayList requests; + private Queue responses; + + public MockJobControllerImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(GeneratedMessageV3 response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void submitJob(SubmitJobRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Job) { + requests.add(request); + responseObserver.onNext((Job) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void getJob(GetJobRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Job) { + requests.add(request); + responseObserver.onNext((Job) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void listJobs(ListJobsRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof ListJobsResponse) { + requests.add(request); + responseObserver.onNext((ListJobsResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void updateJob(UpdateJobRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Job) { + requests.add(request); + responseObserver.onNext((Job) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void cancelJob(CancelJobRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Job) { + requests.add(request); + responseObserver.onNext((Job) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void deleteJob(DeleteJobRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext((Empty) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateService.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateService.java new file mode 100644 index 000000000000..7ff76b3dedd1 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateService.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.ServerServiceDefinition; +import java.util.List; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockWorkflowTemplateService implements MockGrpcService { + private final MockWorkflowTemplateServiceImpl serviceImpl; + + public MockWorkflowTemplateService() { + serviceImpl = new MockWorkflowTemplateServiceImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(GeneratedMessageV3 response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateServiceImpl.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateServiceImpl.java new file mode 100644 index 000000000000..8b3b52e17d6b --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/MockWorkflowTemplateServiceImpl.java @@ -0,0 +1,151 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.cloud.dataproc.v1beta2.WorkflowTemplateServiceGrpc.WorkflowTemplateServiceImplBase; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockWorkflowTemplateServiceImpl extends WorkflowTemplateServiceImplBase { + private ArrayList requests; + private Queue responses; + + public MockWorkflowTemplateServiceImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(GeneratedMessageV3 response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createWorkflowTemplate( + CreateWorkflowTemplateRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof WorkflowTemplate) { + requests.add(request); + responseObserver.onNext((WorkflowTemplate) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void getWorkflowTemplate( + GetWorkflowTemplateRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof WorkflowTemplate) { + requests.add(request); + responseObserver.onNext((WorkflowTemplate) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void instantiateWorkflowTemplate( + InstantiateWorkflowTemplateRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext((Operation) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void updateWorkflowTemplate( + UpdateWorkflowTemplateRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof WorkflowTemplate) { + requests.add(request); + responseObserver.onNext((WorkflowTemplate) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void listWorkflowTemplates( + ListWorkflowTemplatesRequest request, + StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof ListWorkflowTemplatesResponse) { + requests.add(request); + responseObserver.onNext((ListWorkflowTemplatesResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void deleteWorkflowTemplate( + DeleteWorkflowTemplateRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext((Empty) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } +} diff --git a/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClientTest.java b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClientTest.java new file mode 100644 index 000000000000..f66fd0b8ccd0 --- /dev/null +++ b/google-cloud-clients/google-cloud-dataproc/src/test/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceClientTest.java @@ -0,0 +1,378 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.dataproc.v1beta2; + +import static com.google.cloud.dataproc.v1beta2.WorkflowTemplateServiceClient.ListWorkflowTemplatesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.Lists; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class WorkflowTemplateServiceClientTest { + private static MockClusterController mockClusterController; + private static MockJobController mockJobController; + private static MockWorkflowTemplateService mockWorkflowTemplateService; + private static MockServiceHelper serviceHelper; + private WorkflowTemplateServiceClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockClusterController = new MockClusterController(); + mockJobController = new MockJobController(); + mockWorkflowTemplateService = new MockWorkflowTemplateService(); + serviceHelper = + new MockServiceHelper( + "in-process-1", + Arrays.asList( + mockClusterController, mockJobController, mockWorkflowTemplateService)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + WorkflowTemplateServiceSettings settings = + WorkflowTemplateServiceSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = WorkflowTemplateServiceClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + public void createWorkflowTemplateTest() { + String id = "id3355"; + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + int version = 351608024; + WorkflowTemplate expectedResponse = + WorkflowTemplate.newBuilder() + .setId(id) + .setName(name.toString()) + .setVersion(version) + .build(); + mockWorkflowTemplateService.addResponse(expectedResponse); + + RegionName parent = RegionName.of("[PROJECT]", "[REGION]"); + WorkflowTemplate template = WorkflowTemplate.newBuilder().build(); + + WorkflowTemplate actualResponse = client.createWorkflowTemplate(parent, template); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockWorkflowTemplateService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateWorkflowTemplateRequest actualRequest = + (CreateWorkflowTemplateRequest) actualRequests.get(0); + + Assert.assertEquals(parent, RegionName.parse(actualRequest.getParent())); + Assert.assertEquals(template, actualRequest.getTemplate()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void createWorkflowTemplateExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockWorkflowTemplateService.addException(exception); + + try { + RegionName parent = RegionName.of("[PROJECT]", "[REGION]"); + WorkflowTemplate template = WorkflowTemplate.newBuilder().build(); + + client.createWorkflowTemplate(parent, template); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void getWorkflowTemplateTest() { + String id = "id3355"; + WorkflowTemplateName name2 = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + int version = 351608024; + WorkflowTemplate expectedResponse = + WorkflowTemplate.newBuilder() + .setId(id) + .setName(name2.toString()) + .setVersion(version) + .build(); + mockWorkflowTemplateService.addResponse(expectedResponse); + + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + + WorkflowTemplate actualResponse = client.getWorkflowTemplate(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockWorkflowTemplateService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetWorkflowTemplateRequest actualRequest = (GetWorkflowTemplateRequest) actualRequests.get(0); + + Assert.assertEquals(name, WorkflowTemplateName.parse(actualRequest.getName())); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void getWorkflowTemplateExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockWorkflowTemplateService.addException(exception); + + try { + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + + client.getWorkflowTemplate(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void instantiateWorkflowTemplateTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("instantiateWorkflowTemplateTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockWorkflowTemplateService.addResponse(resultOperation); + + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + String instanceId = "instanceId-2101995259"; + + Empty actualResponse = client.instantiateWorkflowTemplateAsync(name, instanceId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockWorkflowTemplateService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + InstantiateWorkflowTemplateRequest actualRequest = + (InstantiateWorkflowTemplateRequest) actualRequests.get(0); + + Assert.assertEquals(name, WorkflowTemplateName.parse(actualRequest.getName())); + Assert.assertEquals(instanceId, actualRequest.getInstanceId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void instantiateWorkflowTemplateExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockWorkflowTemplateService.addException(exception); + + try { + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + String instanceId = "instanceId-2101995259"; + + client.instantiateWorkflowTemplateAsync(name, instanceId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + @SuppressWarnings("all") + public void updateWorkflowTemplateTest() { + String id = "id3355"; + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + int version = 351608024; + WorkflowTemplate expectedResponse = + WorkflowTemplate.newBuilder() + .setId(id) + .setName(name.toString()) + .setVersion(version) + .build(); + mockWorkflowTemplateService.addResponse(expectedResponse); + + WorkflowTemplate template = WorkflowTemplate.newBuilder().build(); + + WorkflowTemplate actualResponse = client.updateWorkflowTemplate(template); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockWorkflowTemplateService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateWorkflowTemplateRequest actualRequest = + (UpdateWorkflowTemplateRequest) actualRequests.get(0); + + Assert.assertEquals(template, actualRequest.getTemplate()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void updateWorkflowTemplateExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockWorkflowTemplateService.addException(exception); + + try { + WorkflowTemplate template = WorkflowTemplate.newBuilder().build(); + + client.updateWorkflowTemplate(template); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void listWorkflowTemplatesTest() { + String nextPageToken = ""; + WorkflowTemplate templatesElement = WorkflowTemplate.newBuilder().build(); + List templates = Arrays.asList(templatesElement); + ListWorkflowTemplatesResponse expectedResponse = + ListWorkflowTemplatesResponse.newBuilder() + .setNextPageToken(nextPageToken) + .addAllTemplates(templates) + .build(); + mockWorkflowTemplateService.addResponse(expectedResponse); + + RegionName parent = RegionName.of("[PROJECT]", "[REGION]"); + + ListWorkflowTemplatesPagedResponse pagedListResponse = client.listWorkflowTemplates(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getTemplatesList().get(0), resources.get(0)); + + List actualRequests = mockWorkflowTemplateService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListWorkflowTemplatesRequest actualRequest = + (ListWorkflowTemplatesRequest) actualRequests.get(0); + + Assert.assertEquals(parent, RegionName.parse(actualRequest.getParent())); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void listWorkflowTemplatesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockWorkflowTemplateService.addException(exception); + + try { + RegionName parent = RegionName.of("[PROJECT]", "[REGION]"); + + client.listWorkflowTemplates(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void deleteWorkflowTemplateTest() { + Empty expectedResponse = Empty.newBuilder().build(); + mockWorkflowTemplateService.addResponse(expectedResponse); + + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + + client.deleteWorkflowTemplate(name); + + List actualRequests = mockWorkflowTemplateService.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteWorkflowTemplateRequest actualRequest = + (DeleteWorkflowTemplateRequest) actualRequests.get(0); + + Assert.assertEquals(name, WorkflowTemplateName.parse(actualRequest.getName())); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void deleteWorkflowTemplateExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockWorkflowTemplateService.addException(exception); + + try { + WorkflowTemplateName name = + WorkflowTemplateName.of("[PROJECT]", "[REGION]", "[WORKFLOW_TEMPLATE]"); + + client.deleteWorkflowTemplate(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } +} From bd6452fd557819e5b8bd7c2dcbbdcf3322dc1b64 Mon Sep 17 00:00:00 2001 From: Garrett Jones Date: Wed, 1 Aug 2018 11:25:56 -0700 Subject: [PATCH 2/2] Build files --- .../pom.xml | 31 +++++++++++++++++++ google-api-grpc/pom.xml | 12 +++++++ .../pom.xml | 31 +++++++++++++++++++ google-cloud-bom/pom.xml | 10 ++++++ .../google-cloud-dataproc/pom.xml | 8 +++++ google-cloud-clients/pom.xml | 2 +- versions.txt | 2 ++ 7 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml create mode 100644 google-api-grpc/proto-google-cloud-dataproc-v1beta2/pom.xml diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml new file mode 100644 index 000000000000..d09eb9e7b952 --- /dev/null +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml @@ -0,0 +1,31 @@ + + 4.0.0 + grpc-google-cloud-dataproc-v1beta2 + 0.20.2-SNAPSHOT + grpc-google-cloud-dataproc-v1beta2 + GRPC library for grpc-google-cloud-dataproc-v1beta2 + + com.google.api.grpc + google-api-grpc + 0.20.2-SNAPSHOT + + + + io.grpc + grpc-stub + compile + + + io.grpc + grpc-protobuf + compile + + + com.google.api.grpc + proto-google-cloud-dataproc-v1beta2 + compile + + + diff --git a/google-api-grpc/pom.xml b/google-api-grpc/pom.xml index 752a6ba6869c..375515914c5a 100644 --- a/google-api-grpc/pom.xml +++ b/google-api-grpc/pom.xml @@ -191,6 +191,16 @@ grpc-google-cloud-dataproc-v1 0.20.2-SNAPSHOT + + com.google.api.grpc + proto-google-cloud-dataproc-v1beta2 + 0.20.2-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-dataproc-v1beta2 + 0.20.2-SNAPSHOT + com.google.api.grpc proto-google-cloud-datastore-v1 @@ -546,6 +556,7 @@ grpc-google-cloud-bigtable-v2 grpc-google-cloud-container-v1 grpc-google-cloud-dataproc-v1 + grpc-google-cloud-dataproc-v1beta2 grpc-google-cloud-dialogflow-v2 grpc-google-cloud-dialogflow-v2beta1 grpc-google-cloud-dlp-v2 @@ -587,6 +598,7 @@ proto-google-cloud-bigtable-v2 proto-google-cloud-container-v1 proto-google-cloud-dataproc-v1 + proto-google-cloud-dataproc-v1beta2 proto-google-cloud-datastore-v1 proto-google-cloud-dialogflow-v2 proto-google-cloud-dialogflow-v2beta1 diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/pom.xml b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/pom.xml new file mode 100644 index 000000000000..d927b2526354 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/pom.xml @@ -0,0 +1,31 @@ + + 4.0.0 + proto-google-cloud-dataproc-v1beta2 + 0.20.2-SNAPSHOT + proto-google-cloud-dataproc-v1beta2 + PROTO library for proto-google-cloud-dataproc-v1beta2 + + com.google.api.grpc + google-api-grpc + 0.20.2-SNAPSHOT + + + + com.google.protobuf + protobuf-java + compile + + + com.google.api + api-common + compile + + + com.google.api.grpc + proto-google-common-protos + compile + + + diff --git a/google-cloud-bom/pom.xml b/google-cloud-bom/pom.xml index 93ca0f8eb072..15f61256b309 100644 --- a/google-cloud-bom/pom.xml +++ b/google-cloud-bom/pom.xml @@ -417,6 +417,16 @@ grpc-google-cloud-dataproc-v1 0.20.2-SNAPSHOT + + com.google.api.grpc + proto-google-cloud-dataproc-v1beta2 + 0.20.2-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-dataproc-v1beta2 + 0.20.2-SNAPSHOT + com.google.cloud google-cloud-datastore diff --git a/google-cloud-clients/google-cloud-dataproc/pom.xml b/google-cloud-clients/google-cloud-dataproc/pom.xml index 2a396e703587..059a99f38ca8 100644 --- a/google-cloud-clients/google-cloud-dataproc/pom.xml +++ b/google-cloud-clients/google-cloud-dataproc/pom.xml @@ -34,6 +34,14 @@ com.google.api.grpc grpc-google-cloud-dataproc-v1 + + com.google.api.grpc + proto-google-cloud-dataproc-v1beta2 + + + com.google.api.grpc + grpc-google-cloud-dataproc-v1beta2 + io.grpc grpc-netty-shaded diff --git a/google-cloud-clients/pom.xml b/google-cloud-clients/pom.xml index 235cf8ac1aee..654319ab1529 100644 --- a/google-cloud-clients/pom.xml +++ b/google-cloud-clients/pom.xml @@ -755,7 +755,7 @@ Stub packages - com.google.cloud.automl.v1beta1.stub:com.google.cloud.bigquery.datatransfer.v1.stub:com.google.cloud.bigtable.admin.v2.stub:com.google.cloud.bigtable.data.v2.stub*:com.google.cloud.compute.v1.stub:com.google.cloud.container.v1.stub:com.google.cloud.dataproc.v1.stub:com.google.cloud.dlp.v2beta1.stub:com.google.cloud.dlp.v2.stub:com.google.cloud.dialogflow.v2beta1.stub:com.google.cloud.dialogflow.v2.stub:com.google.cloud.errorreporting.v1beta1.stub:com.google.cloud.firestore.v1beta1.stub:com.google.cloud.iot.v1.stub:com.google.cloud.kms.v1.stub:com.google.cloud.language.v1beta2.stub:com.google.cloud.language.v1.stub:com.google.cloud.logging.v2.stub:com.google.cloud.monitoring.v3.stub:com.google.cloud.oslogin.v1.stub:com.google.cloud.pubsub.v1.stub:com.google.cloud.redis.v1beta1.stub:com.google.cloud.spanner.admin.database.v1.stub:com.google.cloud.spanner.admin.instance.v1.stub:com.google.cloud.spanner.v1.stub:com.google.cloud.speech.v1beta1.stub:com.google.cloud.speech.v1p1beta1.stub:com.google.cloud.speech.v1.stub:com.google.cloud.tasks.v2beta2.stub:com.google.cloud.texttospeech.v1beta1.stub:com.google.cloud.texttospeech.v1.stub:com.google.cloud.trace.v1.stub:com.google.cloud.trace.v2.stub:com.google.cloud.videointelligence.v1beta1.stub:com.google.cloud.videointelligence.v1beta2.stub:com.google.cloud.videointelligence.v1.stub:com.google.cloud.videointelligence.v1p1beta1.stub:com.google.cloud.vision.v1.stub:com.google.cloud.vision.v1p1beta1.stub:com.google.cloud.vision.v1p2beta1.stub:com.google.cloud.vision.v1p3beta1.stub:com.google.cloud.websecurityscanner.v1alpha.stub + com.google.cloud.automl.v1beta1.stub:com.google.cloud.bigquery.datatransfer.v1.stub:com.google.cloud.bigtable.admin.v2.stub:com.google.cloud.bigtable.data.v2.stub*:com.google.cloud.compute.v1.stub:com.google.cloud.container.v1.stub:com.google.cloud.dataproc.v1.stub:com.google.cloud.dataproc.v1beta2.stub:com.google.cloud.dlp.v2beta1.stub:com.google.cloud.dlp.v2.stub:com.google.cloud.dialogflow.v2beta1.stub:com.google.cloud.dialogflow.v2.stub:com.google.cloud.errorreporting.v1beta1.stub:com.google.cloud.firestore.v1beta1.stub:com.google.cloud.iot.v1.stub:com.google.cloud.kms.v1.stub:com.google.cloud.language.v1beta2.stub:com.google.cloud.language.v1.stub:com.google.cloud.logging.v2.stub:com.google.cloud.monitoring.v3.stub:com.google.cloud.oslogin.v1.stub:com.google.cloud.pubsub.v1.stub:com.google.cloud.redis.v1beta1.stub:com.google.cloud.spanner.admin.database.v1.stub:com.google.cloud.spanner.admin.instance.v1.stub:com.google.cloud.spanner.v1.stub:com.google.cloud.speech.v1beta1.stub:com.google.cloud.speech.v1p1beta1.stub:com.google.cloud.speech.v1.stub:com.google.cloud.tasks.v2beta2.stub:com.google.cloud.texttospeech.v1beta1.stub:com.google.cloud.texttospeech.v1.stub:com.google.cloud.trace.v1.stub:com.google.cloud.trace.v2.stub:com.google.cloud.videointelligence.v1beta1.stub:com.google.cloud.videointelligence.v1beta2.stub:com.google.cloud.videointelligence.v1.stub:com.google.cloud.videointelligence.v1p1beta1.stub:com.google.cloud.vision.v1.stub:com.google.cloud.vision.v1p1beta1.stub:com.google.cloud.vision.v1p2beta1.stub:com.google.cloud.vision.v1p3beta1.stub:com.google.cloud.websecurityscanner.v1alpha.stub Deprecated packages diff --git a/versions.txt b/versions.txt index 4f206c84c4c8..18c6d5672fb3 100644 --- a/versions.txt +++ b/versions.txt @@ -14,6 +14,7 @@ grpc-google-cloud-bigtable-admin-v2:0.20.1:0.20.2-SNAPSHOT grpc-google-cloud-bigtable-v2:0.20.1:0.20.2-SNAPSHOT grpc-google-cloud-container-v1:0.20.1:0.20.2-SNAPSHOT grpc-google-cloud-dataproc-v1:0.20.1:0.20.2-SNAPSHOT +grpc-google-cloud-dataproc-v1beta2:0.20.1:0.20.2-SNAPSHOT grpc-google-cloud-dialogflow-v2:0.20.1:0.20.2-SNAPSHOT grpc-google-cloud-dialogflow-v2beta1:0.20.1:0.20.2-SNAPSHOT grpc-google-cloud-dlp-v2:0.20.1:0.20.2-SNAPSHOT @@ -55,6 +56,7 @@ proto-google-cloud-bigtable-admin-v2:0.20.1:0.20.2-SNAPSHOT proto-google-cloud-bigtable-v2:0.20.1:0.20.2-SNAPSHOT proto-google-cloud-container-v1:0.20.1:0.20.2-SNAPSHOT proto-google-cloud-dataproc-v1:0.20.1:0.20.2-SNAPSHOT +proto-google-cloud-dataproc-v1beta2:0.20.1:0.20.2-SNAPSHOT proto-google-cloud-datastore-v1:0.20.1:0.20.2-SNAPSHOT proto-google-cloud-dialogflow-v2:0.20.1:0.20.2-SNAPSHOT proto-google-cloud-dialogflow-v2beta1:0.20.1:0.20.2-SNAPSHOT