Generate text with safety settings

This sample demonstrates how to use the Gemini model with safety settings to generate text.

Explore further

For detailed documentation that includes this code sample, see the following:

Code sample

Go

Before trying this sample, follow the Go setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Go API reference documentation .

To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  import 
  
 ( 
  
 "context" 
  
 "fmt" 
  
 "io" 
  
 "google.golang.org/genai" 
 ) 
 // generateTextWithSafety shows how to apply safety settings to a text generation request. 
 func 
  
 generateTextWithSafety 
 ( 
 w 
  
 io 
 . 
 Writer 
 ) 
  
 error 
  
 { 
  
 ctx 
  
 := 
  
 context 
 . 
 Background 
 () 
  
 client 
 , 
  
 err 
  
 := 
  
 genai 
 . 
 NewClient 
 ( 
 ctx 
 , 
  
& genai 
 . 
 ClientConfig 
 { 
  
 HTTPOptions 
 : 
  
 genai 
 . 
 HTTPOptions 
 { 
 APIVersion 
 : 
  
 "v1" 
 }, 
  
 }) 
  
 if 
  
 err 
  
 != 
  
 nil 
  
 { 
  
 return 
  
 fmt 
 . 
 Errorf 
 ( 
 "failed to create genai client: %w" 
 , 
  
 err 
 ) 
  
 } 
  
 systemInstruction 
  
 := 
  
& genai 
 . 
 Content 
 { 
  
 Parts 
 : 
  
 [] 
 * 
 genai 
 . 
 Part 
 { 
  
 { 
 Text 
 : 
  
 "Be as mean as possible." 
 }, 
  
 }, 
  
 Role 
 : 
  
 "user" 
 , 
  
 } 
  
 prompt 
  
 := 
  
 "Write a list of 5 disrespectful things that I might say to the universe after stubbing my toe in the dark." 
  
 safetySettings 
  
 := 
  
 [] 
 * 
 genai 
 . 
 SafetySetting 
 { 
  
 { 
 Category 
 : 
  
 genai 
 . 
 HarmCategoryDangerousContent 
 , 
  
 Threshold 
 : 
  
 genai 
 . 
 HarmBlockThresholdBlockLowAndAbove 
 }, 
  
 { 
 Category 
 : 
  
 genai 
 . 
 HarmCategoryHarassment 
 , 
  
 Threshold 
 : 
  
 genai 
 . 
 HarmBlockThresholdBlockLowAndAbove 
 }, 
  
 { 
 Category 
 : 
  
 genai 
 . 
 HarmCategoryHateSpeech 
 , 
  
 Threshold 
 : 
  
 genai 
 . 
 HarmBlockThresholdBlockLowAndAbove 
 }, 
  
 { 
 Category 
 : 
  
 genai 
 . 
 HarmCategorySexuallyExplicit 
 , 
  
 Threshold 
 : 
  
 genai 
 . 
 HarmBlockThresholdBlockLowAndAbove 
 }, 
  
 } 
  
 config 
  
 := 
  
& genai 
 . 
 GenerateContentConfig 
 { 
  
 SystemInstruction 
 : 
  
 systemInstruction 
 , 
  
 SafetySettings 
 : 
  
 safetySettings 
 , 
  
 } 
  
 modelName 
  
 := 
  
 "gemini-2.5-flash" 
  
 resp 
 , 
  
 err 
  
 := 
  
 client 
 . 
 Models 
 . 
 GenerateContent 
 ( 
 ctx 
 , 
  
 modelName 
 , 
  
 [] 
 * 
 genai 
 . 
 Content 
{{Parts: []*genai.Part{{Text: prompt} }, 
  
 Role 
 : 
  
 "user" 
 }}, 
  
 config 
 , 
  
 ) 
  
 if 
  
 err 
  
 != 
  
 nil 
  
 { 
  
 return 
  
 fmt 
 . 
 Errorf 
 ( 
 "failed to generate content: %w" 
 , 
  
 err 
 ) 
  
 } 
  
 fmt 
 . 
 Fprintln 
 ( 
 w 
 , 
  
 resp 
 . 
 Text 
 ()) 
  
 if 
  
 len 
 ( 
 resp 
 . 
 Candidates 
 ) 
 > 
 0 
  
 { 
  
 fmt 
 . 
 Fprintln 
 ( 
 w 
 , 
  
 "Finish Reason:" 
 , 
  
 resp 
 . 
 Candidates 
 [ 
 0 
 ]. 
 FinishReason 
 ) 
  
 for 
  
 _ 
 , 
  
 rating 
  
 := 
  
 range 
  
 resp 
 . 
 Candidates 
 [ 
 0 
 ]. 
 SafetyRatings 
  
 { 
  
 fmt 
 . 
 Fprintf 
 ( 
 w 
 , 
  
 "\nCategory: %v\nIs Blocked: %v\nProbability: %v\nProbability Score: %v\nSeverity: %v\nSeverity Score: %v\n" 
 , 
  
 rating 
 . 
 Category 
 , 
  
 rating 
 . 
 Blocked 
 , 
  
 rating 
 . 
 Probability 
 , 
  
 rating 
 . 
 ProbabilityScore 
 , 
  
 rating 
 . 
 Severity 
 , 
  
 rating 
 . 
 SeverityScore 
 , 
  
 ) 
  
 } 
  
 } 
  
 // Example response: 
  
 // Category: HARM_CATEGORY_HATE_SPEECH 
  
 // Is Blocked: false 
  
 // Probability: NEGLIGIBLE 
  
 // Probability Score: 8.996795e-06 
  
 // Severity: HARM_SEVERITY_NEGLIGIBLE 
  
 // Severity Score: 0.04771039 
  
 // 
  
 // Category: HARM_CATEGORY_DANGEROUS_CONTENT 
  
 // Is Blocked: false 
  
 // Probability: NEGLIGIBLE 
  
 // Probability Score: 2.2431707e-06 
  
 // Severity: HARM_SEVERITY_NEGLIGIBLE 
  
 // Severity Score: 0 
  
 // 
  
 // Category: HARM_CATEGORY_HARASSMENT 
  
 // Is Blocked: false 
  
 // Probability: NEGLIGIBLE 
  
 // Probability Score: 0.00026123362 
  
 // Severity: HARM_SEVERITY_NEGLIGIBLE 
  
 // Severity Score: 0.022358216 
  
 // 
  
 // Category: HARM_CATEGORY_SEXUALLY_EXPLICIT 
  
 // Is Blocked: false 
  
 // Probability: NEGLIGIBLE 
  
 // Probability Score: 6.1352006e-07 
  
 // Severity: HARM_SEVERITY_NEGLIGIBLE 
  
 // Severity Score: 0.020111412 
  
 return 
  
 nil 
 } 
 

Java

Before trying this sample, follow the Java setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Java API reference documentation .

To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  import 
  
 com.google.genai.Client 
 ; 
 import 
  
 com.google.genai.types.Candidate 
 ; 
 import 
  
 com.google.genai.types.Content 
 ; 
 import 
  
 com.google.genai.types.GenerateContentConfig 
 ; 
 import 
  
 com.google.genai.types.GenerateContentResponse 
 ; 
 import 
  
 com.google.genai.types.HarmBlockThreshold 
 ; 
 import 
  
 com.google.genai.types.HarmCategory 
 ; 
 import 
  
 com.google.genai.types.HttpOptions 
 ; 
 import 
  
 com.google.genai.types.Part 
 ; 
 import 
  
 com.google.genai.types.SafetySetting 
 ; 
 import 
  
 java.util.List 
 ; 
 import 
  
 java.util.stream.Collectors 
 ; 
 public 
  
 class 
 SafetyWithTxt 
  
 { 
  
 public 
  
 static 
  
 void 
  
 main 
 ( 
 String 
 [] 
  
 args 
 ) 
  
 { 
  
 // TODO(developer): Replace these variables before running the sample. 
  
 String 
  
 modelId 
  
 = 
  
 "gemini-2.5-flash" 
 ; 
  
 generateContent 
 ( 
 modelId 
 ); 
  
 } 
  
 // Shows how to generate content with safety settings. 
  
 public 
  
 static 
  
 GenerateContentResponse 
  
 generateContent 
 ( 
 String 
  
 modelId 
 ) 
  
 { 
  
 // Client Initialization. Once created, it can be reused for multiple requests. 
  
 try 
  
 ( 
 Client 
  
 client 
  
 = 
  
 Client 
 . 
 builder 
 () 
  
 . 
 location 
 ( 
 "global" 
 ) 
  
 . 
 vertexAI 
 ( 
 true 
 ) 
  
 . 
 httpOptions 
 ( 
 HttpOptions 
 . 
 builder 
 (). 
 apiVersion 
 ( 
 "v1" 
 ). 
 build 
 ()) 
  
 . 
 build 
 ()) 
  
 { 
  
 String 
  
 systemInstruction 
  
 = 
  
 "Be as mean as possible." 
 ; 
  
 String 
  
 prompt 
  
 = 
  
 "Write a list of 5 disrespectful things that I might say" 
  
 + 
  
 " to the universe after stubbing my toe in the dark." 
 ; 
  
 // Set safety settings. 
  
 List<HarmCategory 
 . 
 Known 
>  
 categoriesToBlock 
  
 = 
  
 List 
 . 
 of 
 ( 
  
 HarmCategory 
 . 
 Known 
 . 
 HARM_CATEGORY_DANGEROUS_CONTENT 
 , 
  
 HarmCategory 
 . 
 Known 
 . 
 HARM_CATEGORY_HARASSMENT 
 , 
  
 HarmCategory 
 . 
 Known 
 . 
 HARM_CATEGORY_HATE_SPEECH 
 , 
  
 HarmCategory 
 . 
 Known 
 . 
 HARM_CATEGORY_SEXUALLY_EXPLICIT 
 ); 
  
 List<SafetySetting> 
  
 safetySettings 
  
 = 
  
 categoriesToBlock 
 . 
 stream 
 () 
  
 . 
 map 
 ( 
  
 category 
  
 - 
>  
 SafetySetting 
 . 
 builder 
 () 
  
 . 
 category 
 ( 
 category 
 ) 
  
 . 
 threshold 
 ( 
 HarmBlockThreshold 
 . 
 Known 
 . 
 BLOCK_LOW_AND_ABOVE 
 ) 
  
 . 
 build 
 ()) 
  
 . 
 collect 
 ( 
 Collectors 
 . 
 toList 
 ()); 
  
 GenerateContentResponse 
  
 response 
  
 = 
  
 client 
 . 
 models 
 . 
 generateContent 
 ( 
  
 modelId 
 , 
  
 prompt 
 , 
  
 GenerateContentConfig 
 . 
 builder 
 () 
  
 . 
 systemInstruction 
 ( 
 Content 
 . 
 fromParts 
 ( 
 Part 
 . 
 fromText 
 ( 
 systemInstruction 
 ))) 
  
 . 
 safetySettings 
 ( 
 safetySettings 
 ) 
  
 . 
 build 
 ()); 
  
 // Get response candidate. 
  
 Candidate 
  
 candidate 
  
 = 
  
 response 
  
 . 
 candidates 
 () 
  
 . 
 flatMap 
 ( 
 candidates 
  
 - 
>  
 candidates 
 . 
 stream 
 (). 
 findFirst 
 ()) 
  
 . 
 orElseThrow 
 ( 
  
 () 
  
 - 
>  
 new 
  
 IllegalStateException 
 ( 
 "No response candidate generated by the model." 
 )); 
  
 // Finish Reason will be `SAFETY` if it is blocked. 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 candidate 
 . 
 finishReason 
 ()); 
  
 // Example response: 
  
 // Optional[SAFETY] 
  
 // For details on all the fields in the response. 
  
 candidate 
  
 . 
 safetyRatings 
 () 
  
 . 
 ifPresent 
 ( 
  
 safetyRatings 
  
 - 
>  
 safetyRatings 
 . 
 forEach 
 ( 
  
 safetyRating 
  
 - 
>  
 { 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 "\nCategory: " 
  
 + 
  
 safetyRating 
 . 
 category 
 ()); 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 "Is Blocked: " 
  
 + 
  
 safetyRating 
 . 
 blocked 
 ()); 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 "Probability: " 
  
 + 
  
 safetyRating 
 . 
 probability 
 ()); 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 "Probability Score: " 
  
 + 
  
 safetyRating 
 . 
 probabilityScore 
 ()); 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 "Severity: " 
  
 + 
  
 safetyRating 
 . 
 severity 
 ()); 
  
 System 
 . 
 out 
 . 
 println 
 ( 
 "Severity Score: " 
  
 + 
  
 safetyRating 
 . 
 severityScore 
 ()); 
  
 })); 
  
 // Example response: 
  
 // Category: Optional[HARM_CATEGORY_HATE_SPEECH] 
  
 // Is Blocked: Optional.empty 
  
 // Probability: Optional[NEGLIGIBLE] 
  
 // Probability Score: Optional[1.9967922E-5] 
  
 // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE] 
  
 // Severity Score: Optional[0.05732864] 
  
 // 
  
 // Category: Optional[HARM_CATEGORY_DANGEROUS_CONTENT] 
  
 // Is Blocked: Optional.empty 
  
 // Probability: Optional[NEGLIGIBLE] 
  
 // Probability Score: Optional[2.9124324E-6] 
  
 // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE] 
  
 // Severity Score: Optional[0.04544826] 
  
 // 
  
 // Category: Optional[HARM_CATEGORY_HARASSMENT] 
  
 // Is Blocked: Optional[true] 
  
 // Probability: Optional[MEDIUM] 
  
 // Probability Score: Optional[0.4593908] 
  
 // Severity: Optional[HARM_SEVERITY_MEDIUM] 
  
 // Severity Score: Optional[0.22082388] 
  
 // 
  
 // Category: Optional[HARM_CATEGORY_SEXUALLY_EXPLICIT] 
  
 // Is Blocked: Optional.empty 
  
 // Probability: Optional[NEGLIGIBLE] 
  
 // Probability Score: Optional[6.453211E-8] 
  
 // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE] 
  
 // Severity Score: Optional[0.023201048] 
  
 return 
  
 response 
 ; 
  
 } 
  
 } 
 } 
 

Python

Before trying this sample, follow the Python setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Python API reference documentation .

To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .

  from 
  
 google 
  
 import 
 genai 
 from 
  
 google.genai.types 
  
 import 
 ( 
 GenerateContentConfig 
 , 
 HarmCategory 
 , 
 HarmBlockThreshold 
 , 
 HttpOptions 
 , 
 SafetySetting 
 , 
 ) 
 client 
 = 
 genai 
 . 
 Client 
 ( 
 http_options 
 = 
 HttpOptions 
 ( 
 api_version 
 = 
 "v1" 
 )) 
 system_instruction 
 = 
 "Be as mean as possible." 
 prompt 
 = 
 """ 
 Write a list of 5 disrespectful things that I might say to the universe after stubbing my toe in the dark. 
 """ 
 safety_settings 
 = 
 [ 
 SafetySetting 
 ( 
 category 
 = 
 HarmCategory 
 . 
 HARM_CATEGORY_DANGEROUS_CONTENT 
 , 
 threshold 
 = 
 HarmBlockThreshold 
 . 
 BLOCK_LOW_AND_ABOVE 
 , 
 ), 
 SafetySetting 
 ( 
 category 
 = 
 HarmCategory 
 . 
 HARM_CATEGORY_HARASSMENT 
 , 
 threshold 
 = 
 HarmBlockThreshold 
 . 
 BLOCK_LOW_AND_ABOVE 
 , 
 ), 
 SafetySetting 
 ( 
 category 
 = 
 HarmCategory 
 . 
 HARM_CATEGORY_HATE_SPEECH 
 , 
 threshold 
 = 
 HarmBlockThreshold 
 . 
 BLOCK_LOW_AND_ABOVE 
 , 
 ), 
 SafetySetting 
 ( 
 category 
 = 
 HarmCategory 
 . 
 HARM_CATEGORY_SEXUALLY_EXPLICIT 
 , 
 threshold 
 = 
 HarmBlockThreshold 
 . 
 BLOCK_LOW_AND_ABOVE 
 , 
 ), 
 ] 
 response 
 = 
 client 
 . 
 models 
 . 
 generate_content 
 ( 
 model 
 = 
 "gemini-2.5-flash" 
 , 
 contents 
 = 
 prompt 
 , 
 config 
 = 
 GenerateContentConfig 
 ( 
 system_instruction 
 = 
 system_instruction 
 , 
 safety_settings 
 = 
 safety_settings 
 , 
 ), 
 ) 
 # Response will be `None` if it is blocked. 
 print 
 ( 
 response 
 . 
 text 
 ) 
 # Example response: 
 #     None 
 # Finish Reason will be `SAFETY` if it is blocked. 
 print 
 ( 
 response 
 . 
 candidates 
 [ 
 0 
 ] 
 . 
 finish_reason 
 ) 
 # Example response: 
 #     FinishReason.SAFETY 
 # For details on all the fields in the response 
 for 
 each 
 in 
 response 
 . 
 candidates 
 [ 
 0 
 ] 
 . 
 safety_ratings 
 : 
 print 
 ( 
 ' 
 \n 
 Category: ' 
 , 
 str 
 ( 
 each 
 . 
 category 
 )) 
 print 
 ( 
 'Is Blocked:' 
 , 
 True 
 if 
 each 
 . 
 blocked 
 else 
 False 
 ) 
 print 
 ( 
 'Probability: ' 
 , 
 each 
 . 
 probability 
 ) 
 print 
 ( 
 'Probability Score: ' 
 , 
 each 
 . 
 probability_score 
 ) 
 print 
 ( 
 'Severity:' 
 , 
 each 
 . 
 severity 
 ) 
 print 
 ( 
 'Severity Score:' 
 , 
 each 
 . 
 severity_score 
 ) 
 # Example response: 
 # 
 #     Category:  HarmCategory.HARM_CATEGORY_HATE_SPEECH 
 #     Is Blocked: False 
 #     Probability:  HarmProbability.NEGLIGIBLE 
 #     Probability Score:  2.547714e-05 
 #     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE 
 #     Severity Score: None 
 # 
 #     Category:  HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT 
 #     Is Blocked: False 
 #     Probability:  HarmProbability.NEGLIGIBLE 
 #     Probability Score:  3.6103818e-06 
 #     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE 
 #     Severity Score: None 
 # 
 #     Category:  HarmCategory.HARM_CATEGORY_HARASSMENT 
 #     Is Blocked: True 
 #     Probability:  HarmProbability.MEDIUM 
 #     Probability Score:  0.71599233 
 #     Severity: HarmSeverity.HARM_SEVERITY_MEDIUM 
 #     Severity Score: 0.30782545 
 # 
 #     Category:  HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT 
 #     Is Blocked: False 
 #     Probability:  HarmProbability.NEGLIGIBLE 
 #     Probability Score:  1.5624657e-05 
 #     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE 
 #     Severity Score: None 
 

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser .

Design a Mobile Site
View Site in Mobile | Classic
Share by: