importdatarobotasdrdr.Client()# Set to the location of your auto-mpg.csv and auto-mpg-test.csv data files# Example: dataset_file_path = '/Users/myuser/Downloads/auto-mpg.csv'training_dataset_file_path=''test_dataset_file_path=''# Load datasettraining_dataset=dr.Dataset.create_from_file(training_dataset_file_path)# Create a new project based on datasetproject=dr.Project.create_from_dataset(training_dataset.id,project_name='Auto MPG DR-Client')
# Set to the location of your auto-mpg.csv and auto-mpg-test.csv data files# Example: dataset_file_path = '/Users/myuser/Downloads/auto-mpg.csv'training_dataset_file_path<-""test_dataset_file_path<-""training_dataset<-utils::read.csv(training_dataset_file_path)test_dataset<-utils::read.csv(test_dataset_file_path)head(training_dataset)project<-SetupProject(dataSource=training_dataset,projectName="Auto MPG DR-Client",maxWait=60*60)
DATAROBOT_API_TOKEN=${DATAROBOT_API_TOKEN}DATAROBOT_ENDPOINT=${DATAROBOT_ENDPOINT}location=$(curl-Lsi\-XPOST\-H"Authorization: Bearer ${DATAROBOT_API_TOKEN}"\-F'projectName="Auto MPG"'\-F"file=@${DATASET_FILE_PATH}"\"${DATAROBOT_ENDPOINT}"/projects/|grep-i'Location: .*$'|\cut-d" "-f2|tr-d'\r')echo"Uploaded dataset. Checking status of project at: ${location}"whiletrue;doproject_id=$(curl-Ls\-XGET\-H"Authorization: Bearer ${DATAROBOT_API_TOKEN}""${location}"\|grep-Eo'id":\s"\w+'|cut-d'"'-f3|tr-d'\r')if["${project_id}"=""]thenecho"Setting up project..."sleep10elseecho"Project setup complete."echo"Project ID: ${project_id}"breakfidone
# Use training data to build modelsfromdatarobotimportAUTOPILOT_MODE# Set the project's target and initiate Autopilot (runs in Quick mode unless a different mode is specified)project.analyze_and_model(target='mpg',worker_count=-1,mode=AUTOPILOT_MODE.QUICK)# Open the project's Leaderboard to monitor the progress in UI.project.open_in_browser()# Wait for the model creation to finishproject.wait_for_autopilot()model=project.get_top_model()
# Set the project target and initiate AutopilotSetTarget(project,target="mpg")# Block execution until Autopilot is completeWaitForAutopilot(project)model<-GetRecommendedModel(project,type=RecommendedModelType$RecommendedForDeployment)
response=$(curl-Lsi\-XPATCH\-H"Authorization: Bearer ${DATAROBOT_API_TOKEN}"\-H"Content-Type: application/json"\--data'{"target": "mpg", "mode": "quick"}'\"${DATAROBOT_ENDPOINT}/projects/${project_id}/aim"|grep'location: .*$'\|cut-d" "|tr-d'\r')echo"AI training initiated. Checking status of training at: ${response}"whiletrue;doinitial_project_status=$(curl-Ls\-XGET\-H"Authorization: Bearer ${DATAROBOT_API_TOKEN}""${response}"\|grep-Eo'stage":\s"\w+'|cut-d'"'-f3|tr-d'\r')if["${initial_project_status}"=""]thenecho"Setting up AI training..."sleep10elseecho"Training AI."echo"Grab a coffee or catch up on email."breakfidoneproject_status=$(curl-Lsi\-XGET\-H"Authorization: Bearer ${DATAROBOT_API_TOKEN}"\"${DATAROBOT_ENDPOINT}/projects/${project_id}/status"\|grep-Eo'autopilotDone":\strue')if["${project_status}"=""]thenecho"Autopilot training in progress..."sleep60elseecho"Autopilot training complete. Model ready to deploy."breakfidone
# Test predictions on new dataprediction_data=project.upload_dataset(test_dataset_file_path)# Additional code to upload test datapredict_job=model.request_predictions(prediction_data.id)# Directly add test data here instead of the ID of the uploaded datasetpredictions=predict_job.get_result_when_complete()predictions.head()
``` python
# In order to deploy the model, retrieve the prediction server ID before proceeding
prediction_servers = dr.Deployment.list()
deployment = dr.Deployment.create_from_learning_model(
model_id=model.id, label="MPG Prediction Server",
description="Deployed with DataRobot client",
default_prediction_server_id=prediction_servers[1].id
)
# View deployment stats
service_stats = deployment.get_service_stats()
print(service_stats.metrics)
```
importrequestsfrompprintimportpprintimportjsonimportos# JSON records for example autos for which to predict mpgautos=[{"cylinders":4,"displacement":119.0,"horsepower":82.00,"weight":2720.0,"acceleration":19.4,"model year":82,"origin":1,},{"cylinders":8,"displacement":120.0,"horsepower":79.00,"weight":2625.0,"acceleration":18.6,"model year":82,"origin":1,},]# Use the following code to fetch DATAROBOT_API_TOKENwithopen("config.yaml","r")asfile:config=yaml.safe_load(file)# Create REST request for prediction APIprediction_server=deployment.default_prediction_serverprediction_headers={"Authorization":"Bearer {}".format(config.get("DATAROBOT_API_TOKEN",None)),"Content-Type":"application/json","datarobot-key":prediction_servers[1].datarobot_key}predictions=requests.post(f"{prediction_server['url']}/predApi/v1.0/deployments"f"/{deployment.id}/predictions",headers=prediction_headers,data=json.dumps(autos),)pprint(predictions.json())
# Prepare to connect to the prediction serverURL<-paste0(deployment$defaultPredictionServer$url,"/predApi/v1.0/deployments/",deployment$id,"/predictions")USERNAME<-"deployment$owners$preview$email"# This should be your DR email accountAPI_TOKEN<-Sys.getenv("DATAROBOT_API_TOKEN")# This is configured implicitly when you first run `library(datarobot)`# Invoke Predictions API with the test_datasetresponse<-httr::POST(URL,body=jsonlite::toJSON(test_dataset),httr::add_headers("datarobot-key"=deployment$defaultPredictionServer$dataRobotKey),httr::content_type_json(),authenticate(USERNAME,API_TOKEN,"basic"))# Parse the results from the prediction serverpredictionResults<-fromJSON(httr::content(response,as="text"),simplifyDataFrame=TRUE,flatten=TRUE)$dataprint(predictionResults)
After getting started with DataRobot's APIs, navigate to the user guide for overviews, Jupyter notebooks, and task-based tutorials that help you find complete examples of common data science and machine learning workflows. Browse AI accelerators to try out repeatable, code-first workflows and modular building blocks. You can also read the reference documentation available for the REST API and Python API client.