Test a test prompt to generate ideas using a publisher text model.
Code sample
Java
Before trying this sample, follow the Java setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Java API reference documentation .
To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .
import
com.google.cloud.aiplatform.v1. EndpointName
;
import
com.google.cloud.aiplatform.v1. PredictResponse
;
import
com.google.cloud.aiplatform.v1. PredictionServiceClient
;
import
com.google.cloud.aiplatform.v1. PredictionServiceSettings
;
import
com.google.protobuf. Value
;
import
com.google.protobuf.util. JsonFormat
;
import
java.io.IOException
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
PredictTextPromptSample
{
public
static
void
main
(
String
[]
args
)
throws
IOException
{
// TODO(developer): Replace these variables before running the sample.
// Details of designing text prompts for supported large language models:
// https://cloud.google.com/vertex-ai/docs/generative-ai/text/text-overview
String
instance
=
"{ \"prompt\": "
+
"\"Give me ten interview questions for the role of program manager.\"}"
;
String
parameters
=
"{\n"
+
" \"temperature\": 0.2,\n"
+
" \"maxOutputTokens\": 256,\n"
+
" \"topP\": 0.95,\n"
+
" \"topK\": 40\n"
+
"}"
;
String
project
=
"YOUR_PROJECT_ID"
;
String
location
=
"us-central1"
;
String
publisher
=
"google"
;
String
model
=
"text-bison@001"
;
predictTextPrompt
(
instance
,
parameters
,
project
,
location
,
publisher
,
model
);
}
// Get a text prompt from a supported text model
public
static
void
predictTextPrompt
(
String
instance
,
String
parameters
,
String
project
,
String
location
,
String
publisher
,
String
model
)
throws
IOException
{
String
endpoint
=
String
.
format
(
"%s-aiplatform.googleapis.com:443"
,
location
);
PredictionServiceSettings
predictionServiceSettings
=
PredictionServiceSettings
.
newBuilder
().
setEndpoint
(
endpoint
).
build
();
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
try
(
PredictionServiceClient
predictionServiceClient
=
PredictionServiceClient
.
create
(
predictionServiceSettings
))
{
final
EndpointName
endpointName
=
EndpointName
.
ofProjectLocationPublisherModelName
(
project
,
location
,
publisher
,
model
);
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
Value
.
Builder
instanceValue
=
Value
.
newBuilder
();
JsonFormat
.
parser
().
merge
(
instance
,
instanceValue
);
List<Value>
instances
=
new
ArrayList
<> ();
instances
.
add
(
instanceValue
.
build
());
// Use Value.Builder to convert instance to a dynamically typed value that can be
// processed by the service.
Value
.
Builder
parameterValueBuilder
=
Value
.
newBuilder
();
JsonFormat
.
parser
().
merge
(
parameters
,
parameterValueBuilder
);
Value
parameterValue
=
parameterValueBuilder
.
build
();
PredictResponse
predictResponse
=
predictionServiceClient
.
predict
(
endpointName
,
instances
,
parameterValue
);
System
.
out
.
println
(
"Predict Response"
);
System
.
out
.
println
(
predictResponse
);
}
}
}
Ruby
Before trying this sample, follow the Ruby setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Ruby API reference documentation .
To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .
require
"google/cloud/ai_platform/v1"
##
# Vertex AI Predict Text Prompt
#
# @param project_id [String] Your Google Cloud project (e.g. "my-project")
# @param location_id [String] Your Processor Location (e.g. "us-central1")
# @param publisher [String] The Model Publisher (e.g. "google")
# @param model [String] The Model Identifier (e.g. "text-bison@001")
#
def
predict_text_prompt
project_id
:,
location_id
:,
publisher
:,
model
:
# Create the Vertex AI client.
client
=
::
Google
::
Cloud
::
AIPlatform
::
V1
::
PredictionService
::
Client
.
new
do
|
config
|
config
.
endpoint
=
"
#{
location_id
}
-aiplatform.googleapis.com"
end
# Build the resource name from the project.
endpoint
=
client
.
endpoint_path
(
project
:
project_id
,
location
:
location_id
,
publisher
:
publisher
,
model
:
model
)
prompt
=
"Give me ten interview questions for the role of program manager."
# Initialize the request arguments
instance
=
Google
::
Protobuf
::
Value
.
new
(
struct_value
:
Google
::
Protobuf
::
Struct
.
new
(
fields
:
{
"prompt"
=
>
Google
::
Protobuf
::
Value
.
new
(
string_value
:
prompt
)
}
)
)
instances
=
[
instance
]
parameters
=
Google
::
Protobuf
::
Value
.
new
(
struct_value
:
Google
::
Protobuf
::
Struct
.
new
(
fields
:
{
"temperature"
=
>
Google
::
Protobuf
::
Value
.
new
(
number_value
:
0
.
2
),
"maxOutputTokens"
=
>
Google
::
Protobuf
::
Value
.
new
(
number_value
:
256
),
"topP"
=
>
Google
::
Protobuf
::
Value
.
new
(
number_value
:
0
.
95
),
"topK"
=
>
Google
::
Protobuf
::
Value
.
new
(
number_value
:
40
)
}
)
)
# Make the prediction request
response
=
client
.
predict
endpoint
:
endpoint
,
instances
:
instances
,
parameters
:
parameters
# Handle the prediction response
puts
"Predict Response"
puts
response
end
What's next
To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser .