Update a Kafka cluster

Update a Kafka cluster

Explore further

For detailed documentation that includes this code sample, see the following:

Code sample

Go

Before trying this sample, follow the Go setup instructions in the Managed Service for Apache Kafka quickstart using client libraries . For more information, see the Managed Service for Apache Kafka Go API reference documentation .

To authenticate to Managed Service for Apache Kafka, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  import 
  
 ( 
  
 "context" 
  
 "fmt" 
  
 "io" 
  
 "cloud.google.com/go/managedkafka/apiv1/managedkafkapb" 
  
 "google.golang.org/api/option" 
  
 "google.golang.org/protobuf/types/known/fieldmaskpb" 
  
 managedkafka 
  
 "cloud.google.com/go/managedkafka/apiv1" 
 ) 
 func 
  
 updateCluster 
 ( 
 w 
  
 io 
 . 
 Writer 
 , 
  
 projectID 
 , 
  
 region 
 , 
  
 clusterID 
  
 string 
 , 
  
 memory 
  
 int64 
 , 
  
 opts 
  
 ... 
 option 
 . 
 ClientOption 
 ) 
  
 error 
  
 { 
  
 // projectID := "my-project-id" 
  
 // region := "us-central1" 
  
 // clusterID := "my-cluster" 
  
 // memoryBytes := 4221225472 
  
 ctx 
  
 := 
  
 context 
 . 
 Background 
 () 
  
 client 
 , 
  
 err 
  
 := 
  
 managedkafka 
 . 
  NewClient 
 
 ( 
 ctx 
 , 
  
 opts 
 ... 
 ) 
  
 if 
  
 err 
  
 != 
  
 nil 
  
 { 
  
 return 
  
 fmt 
 . 
 Errorf 
 ( 
 "managedkafka.NewClient got err: %w" 
 , 
  
 err 
 ) 
  
 } 
  
 defer 
  
 client 
 . 
 Close 
 () 
  
 clusterPath 
  
 := 
  
 fmt 
 . 
 Sprintf 
 ( 
 "projects/%s/locations/%s/clusters/%s" 
 , 
  
 projectID 
 , 
  
 region 
 , 
  
 clusterID 
 ) 
  
 capacityConfig 
  
 := 
  
& managedkafkapb 
 . 
 CapacityConfig 
 { 
  
 MemoryBytes 
 : 
  
 memory 
 , 
  
 } 
  
 cluster 
  
 := 
  
& managedkafkapb 
 . 
 Cluster 
 { 
  
 Name 
 : 
  
 clusterPath 
 , 
  
 CapacityConfig 
 : 
  
 capacityConfig 
 , 
  
 } 
  
 paths 
  
 := 
  
 [] 
 string 
 { 
 "capacity_config.memory_bytes" 
 } 
  
 updateMask 
  
 := 
  
& fieldmaskpb 
 . 
 FieldMask 
 { 
  
 Paths 
 : 
  
 paths 
 , 
  
 } 
  
 req 
  
 := 
  
& managedkafkapb 
 . 
 UpdateClusterRequest 
 { 
  
 UpdateMask 
 : 
  
 updateMask 
 , 
  
 Cluster 
 : 
  
 cluster 
 , 
  
 } 
  
 op 
 , 
  
 err 
  
 := 
  
 client 
 . 
 UpdateCluster 
 ( 
 ctx 
 , 
  
 req 
 ) 
  
 if 
  
 err 
  
 != 
  
 nil 
  
 { 
  
 return 
  
 fmt 
 . 
 Errorf 
 ( 
 "client.UpdateCluster got err: %w" 
 , 
  
 err 
 ) 
  
 } 
  
 resp 
 , 
  
 err 
  
 := 
  
 op 
 . 
 Wait 
 ( 
 ctx 
 ) 
  
 if 
  
 err 
  
 != 
  
 nil 
  
 { 
  
 return 
  
 fmt 
 . 
 Errorf 
 ( 
 "op.Wait got err: %w" 
 , 
  
 err 
 ) 
  
 } 
  
 fmt 
 . 
 Fprintf 
 ( 
 w 
 , 
  
 "Updated cluster: %#v\n" 
 , 
  
 resp 
 ) 
  
 return 
  
 nil 
 } 
 

Java

Before trying this sample, follow the Java setup instructions in the Managed Service for Apache Kafka quickstart using client libraries . For more information, see the Managed Service for Apache Kafka Java API reference documentation .

To authenticate to Managed Service for Apache Kafka, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  import 
  
 com.google.api.gax.longrunning. OperationFuture 
 
 ; 
 import 
  
 com.google.api.gax.longrunning. OperationSnapshot 
 
 ; 
 import 
  
 com.google.api.gax.longrunning. OperationTimedPollAlgorithm 
 
 ; 
 import 
  
 com.google.api.gax.retrying. RetrySettings 
 
 ; 
 import 
  
 com.google.api.gax.retrying. TimedRetryAlgorithm 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. CapacityConfig 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. Cluster 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. ClusterName 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. ManagedKafkaClient 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. ManagedKafkaSettings 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. OperationMetadata 
 
 ; 
 import 
  
 com.google.cloud.managedkafka.v1. UpdateClusterRequest 
 
 ; 
 import 
  
 com.google.protobuf. FieldMask 
 
 ; 
 import 
  
 java.time. Duration 
 
 ; 
 import 
  
 java.util.concurrent.ExecutionException 
 ; 
 public 
  
 class 
 UpdateCluster 
  
 { 
  
 public 
  
 static 
  
 void 
  
 main 
 ( 
 String 
 [] 
  
 args 
 ) 
  
 throws 
  
 Exception 
  
 { 
  
 // TODO(developer): Replace these variables before running the example. 
  
 String 
  
 projectId 
  
 = 
  
 "my-project-id" 
 ; 
  
 String 
  
 region 
  
 = 
  
 "my-region" 
 ; 
  
 // e.g. us-east1 
  
 String 
  
 clusterId 
  
 = 
  
 "my-cluster" 
 ; 
  
 long 
  
 memoryBytes 
  
 = 
  
 4221225472L 
 ; 
  
 // 4 GiB 
  
 updateCluster 
 ( 
 projectId 
 , 
  
 region 
 , 
  
 clusterId 
 , 
  
 memoryBytes 
 ); 
  
 } 
  
 public 
  
 static 
  
 void 
  
 updateCluster 
 ( 
  
 String 
  
 projectId 
 , 
  
 String 
  
 region 
 , 
  
 String 
  
 clusterId 
 , 
  
 long 
  
 memoryBytes 
 ) 
  
 throws 
  
 Exception 
  
 { 
  
  CapacityConfig 
 
  
 capacityConfig 
  
 = 
  
  CapacityConfig 
 
 . 
 newBuilder 
 (). 
  setMemoryBytes 
 
 ( 
 memoryBytes 
 ). 
 build 
 (); 
  
  Cluster 
 
  
 cluster 
  
 = 
  
  Cluster 
 
 . 
 newBuilder 
 () 
  
 . 
 setName 
 ( 
  ClusterName 
 
 . 
 of 
 ( 
 projectId 
 , 
  
 region 
 , 
  
 clusterId 
 ). 
 toString 
 ()) 
  
 . 
 setCapacityConfig 
 ( 
 capacityConfig 
 ) 
  
 . 
 build 
 (); 
  
  FieldMask 
 
  
 updateMask 
  
 = 
  
  FieldMask 
 
 . 
 newBuilder 
 (). 
  addPaths 
 
 ( 
 "capacity_config.memory_bytes" 
 ). 
 build 
 (); 
  
 // Create the settings to configure the timeout for polling operations 
  
  ManagedKafkaSettings 
 
 . 
 Builder 
  
 settingsBuilder 
  
 = 
  
  ManagedKafkaSettings 
 
 . 
 newBuilder 
 (); 
  
  TimedRetryAlgorithm 
 
  
 timedRetryAlgorithm 
  
 = 
  
  OperationTimedPollAlgorithm 
 
 . 
 create 
 ( 
  
  RetrySettings 
 
 . 
 newBuilder 
 () 
  
 . 
  setTotalTimeoutDuration 
 
 ( 
  Duration 
 
 . 
 ofHours 
 ( 
 1L 
 )) 
  
 . 
 build 
 ()); 
  
 settingsBuilder 
 . 
 updateClusterOperationSettings 
 () 
  
 . 
 setPollingAlgorithm 
 ( 
 timedRetryAlgorithm 
 ); 
  
 try 
  
 ( 
  ManagedKafkaClient 
 
  
 managedKafkaClient 
  
 = 
  
  ManagedKafkaClient 
 
 . 
 create 
 ( 
  
 settingsBuilder 
 . 
 build 
 ())) 
  
 { 
  
  UpdateClusterRequest 
 
  
 request 
  
 = 
  
  UpdateClusterRequest 
 
 . 
 newBuilder 
 (). 
 setUpdateMask 
 ( 
 updateMask 
 ). 
 setCluster 
 ( 
 cluster 
 ). 
 build 
 (); 
  
 OperationFuture<Cluster 
 , 
  
 OperationMetadata 
>  
 future 
  
 = 
  
 managedKafkaClient 
 . 
  updateClusterOperationCallable 
 
 (). 
 futureCall 
 ( 
 request 
 ); 
  
 // Get the initial LRO and print details. CreateCluster contains sample code for polling logs. 
  
  OperationSnapshot 
 
  
 operation 
  
 = 
  
 future 
 . 
 getInitialFuture 
 (). 
 get 
 (); 
  
 System 
 . 
 out 
 . 
 printf 
 ( 
 "Cluster update started. Operation name: %s\nDone: %s\nMetadata: %s\n" 
 , 
  
 operation 
 . 
  getName 
 
 (), 
  
 operation 
 . 
  isDone 
 
 (), 
  
 future 
 . 
 getMetadata 
 (). 
 get 
 (). 
 toString 
 ()); 
  
  Cluster 
 
  
 response 
  
 = 
  
 future 
 . 
 get 
 (); 
  
 System 
 . 
 out 
 . 
 printf 
 ( 
 "Updated cluster: %s\n" 
 , 
  
 response 
 . 
  getName 
 
 ()); 
  
 } 
  
 catch 
  
 ( 
 ExecutionException 
  
 e 
 ) 
  
 { 
  
 System 
 . 
 err 
 . 
 printf 
 ( 
 "managedKafkaClient.updateCluster got err: %s" 
 , 
  
 e 
 . 
 getMessage 
 ()); 
  
 } 
  
 } 
 } 
 

Python

Before trying this sample, follow the Python setup instructions in the Managed Service for Apache Kafka quickstart using client libraries . For more information, see the Managed Service for Apache Kafka Python API reference documentation .

To authenticate to Managed Service for Apache Kafka, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  from 
  
 google.api_core.exceptions 
  
 import 
 GoogleAPICallError 
 from 
  
 google.cloud 
  
 import 
  managedkafka_v1 
 
 from 
  
 google.protobuf 
  
 import 
 field_mask_pb2 
 # TODO(developer) 
 # project_id = "my-project-id" 
 # region = "us-central1" 
 # cluster_id = "my-cluster" 
 # memory_bytes = 4295000000 
 client 
 = 
  managedkafka_v1 
 
 . 
  ManagedKafkaClient 
 
 () 
 cluster 
 = 
  managedkafka_v1 
 
 . 
  Cluster 
 
 () 
 cluster 
 . 
 name 
 = 
 client 
 . 
  cluster_path 
 
 ( 
 project_id 
 , 
 region 
 , 
 cluster_id 
 ) 
 cluster 
 . 
 capacity_config 
 . 
 memory_bytes 
 = 
 memory_bytes 
 update_mask 
 = 
 field_mask_pb2 
 . 
 FieldMask 
 () 
 update_mask 
 . 
 paths 
 . 
 append 
 ( 
 "capacity_config.memory_bytes" 
 ) 
 # For a list of editable fields, one can check https://cloud.google.com/managed-kafka/docs/create-cluster#properties. 
 request 
 = 
  managedkafka_v1 
 
 . 
  UpdateClusterRequest 
 
 ( 
 update_mask 
 = 
 update_mask 
 , 
 cluster 
 = 
 cluster 
 , 
 ) 
 try 
 : 
 operation 
 = 
 client 
 . 
  update_cluster 
 
 ( 
 request 
 = 
 request 
 ) 
 print 
 ( 
 f 
 "Waiting for operation 
 { 
 operation 
 . 
 operation 
 . 
 name 
 } 
 to complete..." 
 ) 
 response 
 = 
 operation 
 . 
 result 
 () 
 print 
 ( 
 "Updated cluster:" 
 , 
 response 
 ) 
 except 
 GoogleAPICallError 
 as 
 e 
 : 
 print 
 ( 
 f 
 "The operation failed with error: 
 { 
 e 
 . 
 message 
 } 
 " 
 ) 
 

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser .

Create a Mobile Website
View Site in Mobile | Classic
Share by: