# The top-level directory that will be used to store each deployment directorybaseDir:"."# Each deployment directory will be prefixed with the following stringdeploymentDirPrefix:"deployment_"# The name of the deployment config file to create inside the deployment directory.# Note: If working with the PPS, DO NOT change this name; the PPS expects this filename.deploymentInfoFile:"config.yml"# If defined, this string will be prefixed to the predictions URL for this deployment,# and the URL will be returned, with the deployment id suffixed to the end with the# /predict endpoint.deploymentPredictionBaseUrl:"http://localhost:8080"# If defined, create a yaml file with the kv of the deployment.# If the name of the file is the same as the deploymentInfoFile,# the key values are added to the same file as the other config.# deploymentKVFile: "kv.yaml"
# Docker network on which to run all containers.# This network must be created prior to running# the agent (i.e., 'docker network create <NAME>`)dockerNetwork:"bosun"# Traefik image to usetraefikImage:"traefik:2.3.3"# Address that will be reported to DataRobotoutfacingPredictionURLPrefix:"http://10.10.12.22:81"# MLOps Agent image to use for monitoringagentImage:"datarobot/mlops-tracking-agent:latest"# RabbitMQ image to use for building a channelrabbitmqImage:"rabbitmq:3-management"# PPS base imageppsBaseImage:"datarobot/datarobot-portable-prediction-api:latest"# Prefix for generated imagesgeneratedImagePrefix:"mlops_"# Prefix for running containerscontainerNamePrefix:"mlops_"# Mapping of traefik proxy ports (not mandatory)traefikPortMapping:80:818080:8081# Mapping of RabbitMQ (not mandatory)rabbitmqPortMapping:15672:156735672:5673
## The following settings are related to connecting to your Kubernetes cluster## The name of the kube-config context to use (similar to --context argument of kubectl). There is a special# `IN_CLUSTER` string to be used if you are running the plugin inside a cluster. The default is "IN_CLUSTER"# kubeConfigContext: IN_CLUSTER# The namespace that you want to create and manage external deployments (similar to --namespace argument of kubectl). You# can leave this as `null` to use the "default" namespace, the namespace defined in your context, or (if running `IN_CLUSTER`)# manage resources in the same namespace the plugin is executing in.# kubeNamespace:## The following settings are related to whether or not MLOps monitoring is enabled## We need to know the location of the dockerized agent image that can be launched into your Kubernetes cluster.# You can build the image by running `make build` in the tools/agent_docker/ directory and retagging the image# and pushing it to your registry.# agentImage: "<FILL-IN-DOCKER-REGISTRY>/mlops-tracking-agent:latest"## The following settings are all related to accessing the model from outside the Kubernetes cluster## The URL prefix used to access the deployed model, i.e., https://example.com/deployments/# The model will be accessible via <outfacingPredictionURLPrefix/<model_id>/predictoutfacingPredictionURLPrefix:"<FILL-CORRECT-URL-FOR-K8S-INGRESS>"# We are still using the beta Ingress resource API, so a class must be provided. If your cluster# doesn't have a default ingress class, please provide one.# ingressClass:## The following settings are all related to building the finalized model image (base image + mlpkg)## The location of the Portable Prediction Server base image. You can download it from DataRobot's developer# tools section, retag it, and push it to your registry.ppsBaseImage:"<FILL-IN-DOCKER-REGISTRY>/datarobot-portable-prediction-api:latest"# The Docker repo to which this plugin can push finalized models. The built images will be tagged# as follows: <generatedImageRepo>:m-<model_pkg_id>generatedImageRepo:"<FILL-IN-DOCKER-REGISTRY>/mlops-model"# We use Kaniko to build our finalized image. See https://github.com/GoogleContainerTools/kaniko#readme.# The default is to use the image below.# kanikoImage: "gcr.io/kaniko-project/executor:v1.5.2"# The name of the Kaniko ConfigMap to use. This provides the settings Kaniko will need to be able to push to# your registry type. See https://github.com/GoogleContainerTools/kaniko#pushing-to-different-registries.# The default is to not use any additional configuration.# kanikoConfigmapName: "docker-config"# The name of the Kaniko Secret to use. This provides the settings Kaniko will need to be able to push to# your registry type. See https://github.com/GoogleContainerTools/kaniko#pushing-to-different-registries.# The default is to not use any additional secrets. The secret must be of the type: kubernetes.io/dockerconfigjson# kanikoSecretName: "registry-credentials"# The name of a service account to use for running Kaniko if you want to run it in a more secure fashion.# See https://github.com/GoogleContainerTools/kaniko#security.# The default is to use the "default" service account in the namespace in which the pod runs.# kanikoServiceAccount: default