Skip to content

Commit 6b95655

Browse files
Google APIscopybara-github
authored andcommitted
feat: add optional parameters (tarball_gcs_dir, diagnosis_interval, jobs, yarn_application_ids) in DiagnoseClusterRequest
PiperOrigin-RevId: 565501215
1 parent ae05e3b commit 6b95655

2 files changed

Lines changed: 40 additions & 19 deletions

File tree

google/cloud/dataproc/v1/BUILD.bazel

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,12 @@ proto_library(
3636
"//google/api:field_behavior_proto",
3737
"//google/api:resource_proto",
3838
"//google/longrunning:operations_proto",
39+
"//google/type:interval_proto",
3940
"@com_google_protobuf//:duration_proto",
4041
"@com_google_protobuf//:empty_proto",
4142
"@com_google_protobuf//:field_mask_proto",
4243
"@com_google_protobuf//:timestamp_proto",
44+
"@com_google_protobuf//:wrappers_proto",
4345
],
4446
)
4547

@@ -144,6 +146,7 @@ go_proto_library(
144146
deps = [
145147
"//google/api:annotations_go_proto",
146148
"//google/longrunning:longrunning_go_proto",
149+
"//google/type:interval_go_proto",
147150
],
148151
)
149152

@@ -354,7 +357,6 @@ load(
354357

355358
csharp_proto_library(
356359
name = "dataproc_csharp_proto",
357-
extra_opts = [],
358360
deps = [":dataproc_proto"],
359361
)
360362

google/cloud/dataproc/v1/clusters.proto

Lines changed: 37 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ import "google/longrunning/operations.proto";
2525
import "google/protobuf/duration.proto";
2626
import "google/protobuf/field_mask.proto";
2727
import "google/protobuf/timestamp.proto";
28+
import "google/protobuf/wrappers.proto";
29+
import "google/type/interval.proto";
2830

2931
option go_package = "cloud.google.com/go/dataproc/v2/apiv1/dataprocpb;dataprocpb";
3032
option java_multiple_files = true;
@@ -671,22 +673,20 @@ message InstanceGroupConfig {
671673
// Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
672674
string min_cpu_platform = 9 [(google.api.field_behavior) = OPTIONAL];
673675

674-
// Optional. The minimum number of instances to create.
675-
// If min_num_instances is set, min_num_instances is used for a criteria to
676-
// decide the cluster. Cluster creation will be failed by being an error state
677-
// if the total number of instances created is less than the
678-
// min_num_instances.
679-
// For example, given that num_instances = 5 and min_num_instances = 3,
680-
// * if 4 instances are created and then registered successfully but one
681-
// instance is failed, the failed VM will be deleted and the cluster will be
682-
// resized to 4 instances in running state.
683-
// * if 2 instances are created successfully and 3 instances are failed,
684-
// the cluster will be in an error state and does not delete failed VMs for
685-
// debugging.
686-
// * if 2 instance are created and then registered successfully but 3
687-
// instances are failed to initialize, the cluster will be in an error state
688-
// and does not delete failed VMs for debugging.
689-
// NB: This can only be set for primary workers now.
676+
// Optional. The minimum number of primary worker instances to create.
677+
// If `min_num_instances` is set, cluster creation will succeed if
678+
// the number of primary workers created is at least equal to the
679+
// `min_num_instances` number.
680+
//
681+
// Example: Cluster creation request with `num_instances` = `5` and
682+
// `min_num_instances` = `3`:
683+
//
684+
// * If 4 VMs are created and 1 instance fails,
685+
// the failed VM is deleted. The cluster is
686+
// resized to 4 instances and placed in a `RUNNING` state.
687+
// * If 2 instances are created and 3 instances fail,
688+
// the cluster in placed in an `ERROR` state. The failed VMs
689+
// are not deleted.
690690
int32 min_num_instances = 12 [(google.api.field_behavior) = OPTIONAL];
691691

692692
// Optional. Instance flexibility Policy allowing a mixture of VM shapes and
@@ -843,12 +843,12 @@ message NodeGroup {
843843
pattern: "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}"
844844
};
845845

846-
// Node group roles.
846+
// Node pool roles.
847847
enum Role {
848848
// Required unspecified role.
849849
ROLE_UNSPECIFIED = 0;
850850

851-
// Job drivers run on the node group.
851+
// Job drivers run on the node pool.
852852
DRIVER = 1;
853853
}
854854

@@ -1531,6 +1531,25 @@ message DiagnoseClusterRequest {
15311531

15321532
// Required. The cluster name.
15331533
string cluster_name = 2 [(google.api.field_behavior) = REQUIRED];
1534+
1535+
// Optional. The output Cloud Storage directory for the diagnostic
1536+
// tarball. If not specified, a task-specific directory in the cluster's
1537+
// staging bucket will be used.
1538+
string tarball_gcs_dir = 4 [(google.api.field_behavior) = OPTIONAL];
1539+
1540+
// Optional. Time interval in which diagnosis should be carried out on the
1541+
// cluster.
1542+
google.type.Interval diagnosis_interval = 6
1543+
[(google.api.field_behavior) = OPTIONAL];
1544+
1545+
// Optional. Specifies a list of jobs on which diagnosis is to be performed.
1546+
// Format: projects/{project}/regions/{region}/jobs/{job}
1547+
repeated string jobs = 10 [(google.api.field_behavior) = OPTIONAL];
1548+
1549+
// Optional. Specifies a list of yarn applications on which diagnosis is to be
1550+
// performed.
1551+
repeated string yarn_application_ids = 11
1552+
[(google.api.field_behavior) = OPTIONAL];
15341553
}
15351554

15361555
// The location of diagnostic output.

0 commit comments

Comments
 (0)