Gets a model using the get_model method.
Explore further
For detailed documentation that includes this code sample, see the following:
Code sample
Java
Before trying this sample, follow the Java setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Java API reference documentation .
To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .
import
com.google.cloud.aiplatform.v1. DeployedModelRef
;
import
com.google.cloud.aiplatform.v1. EnvVar
;
import
com.google.cloud.aiplatform.v1. Model
;
import
com.google.cloud.aiplatform.v1. Model
. ExportFormat
;
import
com.google.cloud.aiplatform.v1. ModelContainerSpec
;
import
com.google.cloud.aiplatform.v1. ModelName
;
import
com.google.cloud.aiplatform.v1. ModelServiceClient
;
import
com.google.cloud.aiplatform.v1. ModelServiceSettings
;
import
com.google.cloud.aiplatform.v1. Port
;
import
com.google.cloud.aiplatform.v1. PredictSchemata
;
import
java.io.IOException
;
public
class
GetModelSample
{
public
static
void
main
(
String
[]
args
)
throws
IOException
{
// TODO(developer): Replace these variables before running the sample.
String
project
=
"YOUR_PROJECT_ID"
;
String
modelId
=
"YOUR_MODEL_ID"
;
getModelSample
(
project
,
modelId
);
}
static
void
getModelSample
(
String
project
,
String
modelId
)
throws
IOException
{
ModelServiceSettings
modelServiceSettings
=
ModelServiceSettings
.
newBuilder
()
.
setEndpoint
(
"us-central1-aiplatform.googleapis.com:443"
)
.
build
();
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try
(
ModelServiceClient
modelServiceClient
=
ModelServiceClient
.
create
(
modelServiceSettings
))
{
String
location
=
"us-central1"
;
ModelName
modelName
=
ModelName
.
of
(
project
,
location
,
modelId
);
Model
modelResponse
=
modelServiceClient
.
getModel
(
modelName
);
System
.
out
.
println
(
"Get Model response"
);
System
.
out
.
format
(
"\tName: %s\n"
,
modelResponse
.
getName
());
System
.
out
.
format
(
"\tDisplay Name: %s\n"
,
modelResponse
.
getDisplayName
());
System
.
out
.
format
(
"\tDescription: %s\n"
,
modelResponse
.
getDescription
());
System
.
out
.
format
(
"\tMetadata Schema Uri: %s\n"
,
modelResponse
.
getMetadataSchemaUri
());
System
.
out
.
format
(
"\tMetadata: %s\n"
,
modelResponse
.
getMetadata
());
System
.
out
.
format
(
"\tTraining Pipeline: %s\n"
,
modelResponse
.
getTrainingPipeline
());
System
.
out
.
format
(
"\tArtifact Uri: %s\n"
,
modelResponse
.
getArtifactUri
());
System
.
out
.
format
(
"\tSupported Deployment Resources Types: %s\n"
,
modelResponse
.
getSupportedDeploymentResourcesTypesList
());
System
.
out
.
format
(
"\tSupported Input Storage Formats: %s\n"
,
modelResponse
.
getSupportedInputStorageFormatsList
());
System
.
out
.
format
(
"\tSupported Output Storage Formats: %s\n"
,
modelResponse
.
getSupportedOutputStorageFormatsList
());
System
.
out
.
format
(
"\tCreate Time: %s\n"
,
modelResponse
.
getCreateTime
());
System
.
out
.
format
(
"\tUpdate Time: %s\n"
,
modelResponse
.
getUpdateTime
());
System
.
out
.
format
(
"\tLabels: %s\n"
,
modelResponse
.
getLabelsMap
());
PredictSchemata
predictSchemata
=
modelResponse
.
getPredictSchemata
();
System
.
out
.
println
(
"\tPredict Schemata"
);
System
.
out
.
format
(
"\t\tInstance Schema Uri: %s\n"
,
predictSchemata
.
getInstanceSchemaUri
());
System
.
out
.
format
(
"\t\tParameters Schema Uri: %s\n"
,
predictSchemata
.
getParametersSchemaUri
());
System
.
out
.
format
(
"\t\tPrediction Schema Uri: %s\n"
,
predictSchemata
.
getPredictionSchemaUri
());
for
(
ExportFormat
exportFormat
:
modelResponse
.
getSupportedExportFormatsList
())
{
System
.
out
.
println
(
"\tSupported Export Format"
);
System
.
out
.
format
(
"\t\tId: %s\n"
,
exportFormat
.
getId
());
}
ModelContainerSpec
containerSpec
=
modelResponse
.
getContainerSpec
();
System
.
out
.
println
(
"\tContainer Spec"
);
System
.
out
.
format
(
"\t\tImage Uri: %s\n"
,
containerSpec
.
getImageUri
());
System
.
out
.
format
(
"\t\tCommand: %s\n"
,
containerSpec
.
getCommandList
());
System
.
out
.
format
(
"\t\tArgs: %s\n"
,
containerSpec
.
getArgsList
());
System
.
out
.
format
(
"\t\tPredict Route: %s\n"
,
containerSpec
.
getPredictRoute
());
System
.
out
.
format
(
"\t\tHealth Route: %s\n"
,
containerSpec
.
getHealthRoute
());
for
(
EnvVar
envVar
:
containerSpec
.
getEnvList
())
{
System
.
out
.
println
(
"\t\tEnv"
);
System
.
out
.
format
(
"\t\t\tName: %s\n"
,
envVar
.
getName
());
System
.
out
.
format
(
"\t\t\tValue: %s\n"
,
envVar
.
getValue
());
}
for
(
Port
port
:
containerSpec
.
getPortsList
())
{
System
.
out
.
println
(
"\t\tPort"
);
System
.
out
.
format
(
"\t\t\tContainer Port: %s\n"
,
port
.
getContainerPort
());
}
for
(
DeployedModelRef
deployedModelRef
:
modelResponse
.
getDeployedModelsList
())
{
System
.
out
.
println
(
"\tDeployed Model"
);
System
.
out
.
format
(
"\t\tEndpoint: %s\n"
,
deployedModelRef
.
getEndpoint
());
System
.
out
.
format
(
"\t\tDeployed Model Id: %s\n"
,
deployedModelRef
.
getDeployedModelId
());
}
}
}
}
Node.js
Before trying this sample, follow the Node.js setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Node.js API reference documentation .
To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const modelId = 'YOUR_MODEL_ID';
// const project = 'YOUR_PROJECT_ID';
// const location = 'YOUR_PROJECT_LOCATION';
// Imports the Google Cloud Model Service Client library
const
{
ModelServiceClient
}
=
require
(
' @google-cloud/aiplatform
'
);
// Specifies the location of the api endpoint
const
clientOptions
=
{
apiEndpoint
:
'us-central1-aiplatform.googleapis.com'
,
};
// Instantiates a client
const
modelServiceClient
=
new
ModelServiceClient
(
clientOptions
);
async
function
getModel
()
{
// Configure the parent resource
const
name
=
`projects/
${
project
}
/locations/
${
location
}
/models/
${
modelId
}
`
;
const
request
=
{
name
,
};
// Get and print out a list of all the endpoints for this resource
const
[
response
]
=
await
modelServiceClient
.
getModel
(
request
);
console
.
log
(
'Get model response'
);
console
.
log
(
`\tName :
${
response
.
name
}
`
);
console
.
log
(
`\tDisplayName :
${
response
.
displayName
}
`
);
console
.
log
(
`\tDescription :
${
response
.
description
}
`
);
console
.
log
(
`\tMetadata schema uri :
${
response
.
metadataSchemaUri
}
`
);
console
.
log
(
`\tMetadata :
${
JSON
.
stringify
(
response
.
metadata
)
}
`
);
console
.
log
(
`\tTraining pipeline :
${
response
.
trainingPipeline
}
`
);
console
.
log
(
`\tArtifact uri :
${
response
.
artifactUri
}
`
);
console
.
log
(
`\tSupported deployment resource types : \
${
response
.
supportedDeploymentResourceTypes
}
`
);
console
.
log
(
`\tSupported input storage formats : \
${
response
.
supportedInputStorageFormats
}
`
);
console
.
log
(
`\tSupported output storage formats : \
${
response
.
supportedOutputStoragFormats
}
`
);
console
.
log
(
`\tCreate time :
${
JSON
.
stringify
(
response
.
createTime
)
}
`
);
console
.
log
(
`\tUpdate time :
${
JSON
.
stringify
(
response
.
updateTime
)
}
`
);
console
.
log
(
`\tLabels :
${
JSON
.
stringify
(
response
.
labels
)
}
`
);
const
predictSchemata
=
response
.
predictSchemata
;
console
.
log
(
'\tPredict schemata'
);
console
.
log
(
`\tInstance schema uri :
${
predictSchemata
.
instanceSchemaUri
}
`
);
console
.
log
(
`\tParameters schema uri :
${
predictSchemata
.
prametersSchemaUri
}
`
);
console
.
log
(
`\tPrediction schema uri :
${
predictSchemata
.
predictionSchemaUri
}
`
);
const
[
supportedExportFormats
]
=
response
.
supportedExportFormats
;
console
.
log
(
'\tSupported export formats'
);
console
.
log
(
`\t
${
supportedExportFormats
}
`
);
const
containerSpec
=
response
.
containerSpec
;
console
.
log
(
'\tContainer Spec'
);
if
(
!
containerSpec
)
{
console
.
log
(
`\t\t
${
JSON
.
stringify
(
containerSpec
)
}
`
);
console
.
log
(
'\t\tImage uri : {}'
);
console
.
log
(
'\t\tCommand : {}'
);
console
.
log
(
'\t\tArgs : {}'
);
console
.
log
(
'\t\tPredict route : {}'
);
console
.
log
(
'\t\tHealth route : {}'
);
console
.
log
(
'\t\tEnv'
);
console
.
log
(
'\t\t\t{}'
);
console
.
log
(
'\t\tPort'
);
console
.
log
(
'\t\t{}'
);
}
else
{
console
.
log
(
`\t\t
${
JSON
.
stringify
(
containerSpec
)
}
`
);
console
.
log
(
`\t\tImage uri :
${
containerSpec
.
imageUri
}
`
);
console
.
log
(
`\t\tCommand :
${
containerSpec
.
command
}
`
);
console
.
log
(
`\t\tArgs :
${
containerSpec
.
args
}
`
);
console
.
log
(
`\t\tPredict route :
${
containerSpec
.
predictRoute
}
`
);
console
.
log
(
`\t\tHealth route :
${
containerSpec
.
healthRoute
}
`
);
const
env
=
containerSpec
.
env
;
console
.
log
(
'\t\tEnv'
);
console
.
log
(
`\t\t\t
${
JSON
.
stringify
(
env
)
}
`
);
const
ports
=
containerSpec
.
ports
;
console
.
log
(
'\t\tPort'
);
console
.
log
(
`\t\t\t
${
JSON
.
stringify
(
ports
)
}
`
);
}
const
[
deployedModels
]
=
response
.
deployedModels
;
console
.
log
(
'\tDeployed models'
);
console
.
log
(
'\t\t'
,
deployedModels
);
}
getModel
();
Python
Before trying this sample, follow the Python setup instructions in the Vertex AI quickstart using client libraries . For more information, see the Vertex AI Python API reference documentation .
To authenticate to Vertex AI, set up Application Default Credentials. For more information, see Set up authentication for a local development environment .
from
google.cloud
import
aiplatform
def
get_model_sample
(
project
:
str
,
model_id
:
str
,
location
:
str
=
"us-central1"
,
api_endpoint
:
str
=
"us-central1-aiplatform.googleapis.com"
,
):
# The AI Platform services require regional API endpoints.
client_options
=
{
"api_endpoint"
:
api_endpoint
}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client
=
aiplatform
.
gapic
.
ModelServiceClient
(
client_options
=
client_options
)
name
=
client
.
model_path
(
project
=
project
,
location
=
location
,
model
=
model_id
)
response
=
client
.
get_model
(
name
=
name
)
print
(
"response:"
,
response
)
What's next
To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser .