diff --git a/custom-words.txt b/custom-words.txt index 65d176d2fa4d..a65aa4e4846f 100644 --- a/custom-words.txt +++ b/custom-words.txt @@ -2185,3 +2185,4 @@ PPSX PPTM XLSM DOCM +multislot \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/Personalizer.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/Personalizer.json new file mode 100644 index 000000000000..ecdd170d1424 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/Personalizer.json @@ -0,0 +1,1841 @@ +{ + "swagger": "2.0", + "info": { + "title": "Personalizer Client v1.1-preview.1", + "description": "Personalizer Service is an Azure Cognitive Service that makes it easy to target content and experiences without complex pre-analysis or cleanup of past data. Given a context and featurized content, the Personalizer Service returns which content item to show to users in rewardActionId. As rewards are sent in response to the use of rewardActionId, the reinforcement learning algorithm will improve the model and improve performance of future rank calls.", + "version": "v1.1-preview.1" + }, + "host": "localhost:5001", + "basePath": "/personalizer/v1.1-preview.1", + "schemes": [ + "https" + ], + "paths": { + "/configurations/service": { + "put": { + "tags": [ + "ConfigurationsV1Dot1Preview1" + ], + "summary": "Update Service Configuration.", + "description": "Update the Personalizer service configuration.", + "operationId": "ServiceConfiguration_Update", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "config", + "description": "The personalizer service configuration.", + "required": true, + "schema": { + "$ref": "#/definitions/ServiceConfiguration" + } + } + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/ServiceConfiguration" + } + }, + "default": { + "description": "Updating defaultReward, rewardWaitTime and rewardAggregation when changing learning mode from Online to Apprentice mode and vice versa is not allowed. Make the mode change and then change the additional settings with an additional API call.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful ServiceConfiguration_Update request": { + "$ref": "./examples/ServiceConfiguration_Update.json" + } + } + }, + "get": { + "tags": [ + "ConfigurationsV1Dot1Preview1" + ], + "summary": "Get Service Configuration.", + "description": "Get the Personalizer service configuration.", + "operationId": "ServiceConfiguration_Get", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/ServiceConfiguration" + } + } + }, + "x-ms-examples": { + "Successful ServiceConfiguration_Get request": { + "$ref": "./examples/ServiceConfiguration_Get.json" + } + } + } + }, + "/configurations/policy": { + "get": { + "tags": [ + "ConfigurationsV1Dot1Preview1" + ], + "summary": "Get Policy.", + "description": "Get the Learning Settings currently used by the Personalizer service.", + "operationId": "Policy_Get", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/PolicyContract" + } + } + }, + "x-ms-examples": { + "Successful Policy_Get request": { + "$ref": "./examples/Policy_Get.json" + } + } + }, + "put": { + "tags": [ + "ConfigurationsV1Dot1Preview1" + ], + "summary": "Update Policy.", + "description": "Update the Learning Settings that the Personalizer service will use to train models.", + "operationId": "Policy_Update", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "policy", + "description": "The learning settings.", + "required": true, + "schema": { + "$ref": "#/definitions/PolicyContract" + } + } + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/PolicyContract" + } + }, + "default": { + "description": "Invalid policy configuration.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Policy_Update request": { + "$ref": "./examples/Policy_Update.json" + } + } + }, + "delete": { + "tags": [ + "ConfigurationsV1Dot1Preview1" + ], + "summary": "Reset Policy.", + "description": "Resets the learning settings of the Personalizer service to default.", + "operationId": "Policy_Reset", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/PolicyContract" + } + } + }, + "x-ms-examples": { + "Successful Policy_Reset request": { + "$ref": "./examples/Policy_Reset.json" + } + } + } + }, + "/configurations/applyFromEvaluation": { + "post": { + "tags": [ + "ConfigurationsV1Dot1Preview1" + ], + "summary": "Apply Learning Settings and model from a pre-existing Offline Evaluation, making them the current online Learning Settings and model and replacing the previous ones.", + "operationId": "Evaluation_Apply", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/PolicyReferenceContract" + } + } + ], + "responses": { + "204": { + "description": "Success" + }, + "default": { + "description": "Learning Settings not found in evaluation.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Evaluation_Apply request": { + "$ref": "./examples/Evaluation_Apply.json" + } + } + } + }, + "/evaluations/{evaluationId}": { + "delete": { + "tags": [ + "EvaluationsV1Dot1Preview1" + ], + "summary": "Delete Evaluation.", + "description": "Delete the Offline Evaluation associated with the Id.", + "operationId": "Evaluations_Delete", + "parameters": [ + { + "in": "path", + "name": "evaluationId", + "description": "Id of the Offline Evaluation to delete.", + "required": true, + "type": "string", + "maxLength": 256 + } + ], + "responses": { + "204": { + "description": "Success" + } + }, + "x-ms-examples": { + "Successful Evaluations_Delete request": { + "$ref": "./examples/Evaluations_Delete.json" + } + } + }, + "get": { + "tags": [ + "EvaluationsV1Dot1Preview1" + ], + "summary": "Get Evaluation.", + "description": "Get the Offline Evaluation associated with the Id.", + "operationId": "Evaluations_Get", + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "path", + "name": "evaluationId", + "description": "Id of the Offline Evaluation.", + "required": true, + "type": "string", + "maxLength": 256 + } + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/Evaluation" + } + }, + "default": { + "description": "Offline Evaluation not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Evaluations_Get request": { + "$ref": "./examples/Evaluations_Get.json" + } + } + } + }, + "/evaluations": { + "get": { + "tags": [ + "EvaluationsV1Dot1Preview1" + ], + "summary": "List Offline Evaluations.", + "description": "List of all Offline Evaluations.", + "operationId": "Evaluations_List", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Evaluation" + } + } + } + }, + "x-ms-examples": { + "Successful Evaluations_List request": { + "$ref": "./examples/Evaluations_List.json" + } + } + }, + "post": { + "tags": [ + "EvaluationsV1Dot1Preview1" + ], + "summary": "Create Offline Evaluation.", + "description": "Submit a new Offline Evaluation job.", + "operationId": "Evaluations_Create", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "evaluation", + "description": "The Offline Evaluation job definition.", + "required": true, + "schema": { + "$ref": "#/definitions/EvaluationContract" + } + } + ], + "responses": { + "201": { + "description": "Success", + "schema": { + "$ref": "#/definitions/Evaluation" + }, + "headers": { + "Location": { + "description": "Location of the Offline Evaluation status and data.", + "type": "string" + } + } + }, + "default": { + "description": "Invalid evaluation contract.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Evaluations_Create request": { + "$ref": "./examples/Evaluations_Create.json" + } + } + } + }, + "/events/{eventId}/reward": { + "post": { + "tags": [ + "Events" + ], + "summary": "Post Reward.", + "description": "Report reward between 0 and 1 that resulted from using the action specified in rewardActionId, for the specified event.", + "operationId": "Events_Reward", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "path", + "name": "eventId", + "description": "The event id this reward applies to.", + "required": true, + "type": "string", + "maxLength": 256 + }, + { + "in": "body", + "name": "reward", + "description": "The reward should be a floating point number, typically between 0 and 1.", + "required": true, + "schema": { + "$ref": "#/definitions/RewardRequest" + } + } + ], + "responses": { + "204": { + "description": "Success" + }, + "default": { + "description": "Invalid reward request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Events_Reward request": { + "$ref": "./examples/Events_Reward.json" + } + } + } + }, + "/events/{eventId}/activate": { + "post": { + "tags": [ + "Events" + ], + "summary": "Activate Event.", + "description": "Report that the specified event was actually used (e.g. by being displayed to the user) and a reward should be expected for it.", + "operationId": "Events_Activate", + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "path", + "name": "eventId", + "description": "The event ID to be activated.", + "required": true, + "type": "string", + "maxLength": 256 + } + ], + "responses": { + "204": { + "description": "Success" + }, + "default": { + "description": "Invalid activate event request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Events_Activate request": { + "$ref": "./examples/Events_Activate.json" + } + } + } + }, + "/logs": { + "delete": { + "tags": [ + "Logs" + ], + "summary": "Deletes Logs.", + "description": "Delete all logs of Rank and Reward calls stored by Personalizer.", + "operationId": "Log_Delete", + "responses": { + "204": { + "description": "Success" + } + }, + "x-ms-examples": { + "Successful Log_Delete request": { + "$ref": "./examples/Log_Delete.json" + } + } + } + }, + "/logs/properties": { + "get": { + "tags": [ + "Logs" + ], + "summary": "Get Log Properties.", + "description": "Get properties of the Personalizer logs.", + "operationId": "Log_GetProperties", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/LogsProperties" + } + }, + "default": { + "description": "Log properties not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Log_GetProperties request": { + "$ref": "./examples/Log_GetProperties.json" + } + } + } + }, + "/model": { + "get": { + "tags": [ + "Model" + ], + "summary": "Get Model.", + "description": "Get the model file generated by Personalizer service.", + "operationId": "Model_Get", + "produces": [ + "application/octet-stream" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "type": "file" + } + } + }, + "x-ms-examples": { + "Successful Model_Get request": { + "$ref": "./examples/Model_Get.json" + } + } + }, + "delete": { + "tags": [ + "Model" + ], + "summary": "Reset Model.", + "description": "Resets the model file generated by Personalizer service.", + "operationId": "Model_Reset", + "produces": [ + "application/json" + ], + "responses": { + "204": { + "description": "Success" + }, + "default": { + "description": "Model reset failed.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Model_Reset request": { + "$ref": "./examples/Model_Reset.json" + } + } + } + }, + "/model/properties": { + "get": { + "tags": [ + "Model" + ], + "summary": "Get Model Properties.", + "description": "Get properties of the model file generated by Personalizer service.", + "operationId": "Model_GetProperties", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Success", + "schema": { + "$ref": "#/definitions/ModelProperties" + } + } + }, + "x-ms-examples": { + "Successful Model_GetProperties request": { + "$ref": "./examples/Model_GetProperties.json" + } + } + } + }, + "/multislot/events/{eventId}/reward": { + "post": { + "tags": [ + "MultiSlotEvents" + ], + "summary": "Post multi-slot Rewards.", + "description": "Report reward that resulted from using the action specified in rewardActionId for the slot.", + "operationId": "MultiSlotEvents_Reward", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "path", + "name": "eventId", + "description": "The event id this reward applies to.", + "required": true, + "type": "string", + "maxLength": 256 + }, + { + "in": "body", + "name": "body", + "description": "List of slot id and reward values. The reward should be a floating point number, typically between 0 and 1.", + "required": true, + "schema": { + "$ref": "#/definitions/MultiSlotRewardRequest" + } + } + ], + "responses": { + "204": { + "description": "Success" + }, + "default": { + "description": "Invalid reward request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful MultiSlotEvents_Reward request": { + "$ref": "./examples/MultiSlotEvents_Reward.json" + } + } + } + }, + "/multislot/events/{eventId}/activate": { + "post": { + "tags": [ + "MultiSlotEvents" + ], + "summary": "Activate multi-slot Event.", + "description": "Report that the specified event was actually used or displayed to the user and a rewards should be expected for it.", + "operationId": "MultiSlotEvents_Activate", + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "path", + "name": "eventId", + "description": "The event ID this activation applies to.", + "required": true, + "type": "string", + "maxLength": 256 + } + ], + "responses": { + "204": { + "description": "Success" + }, + "default": { + "description": "Invalid activate event request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful MultiSlotEvents_Activate request": { + "$ref": "./examples/MultiSlotEvents_Activate.json" + } + } + } + }, + "/multislot/rank": { + "post": { + "tags": [ + "MultiSlotRank" + ], + "summary": "Post multi-slot Rank.", + "description": "Submit a Personalizer multi-slot rank request. Receives a context, a list of actions, and a list of slots. Returns which of the provided actions should be used in each slot, in each rewardActionId.", + "operationId": "MultiSlot_Rank", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "A Personalizer multi-slot Rank request.", + "required": true, + "schema": { + "$ref": "#/definitions/MultiSlotRankRequest" + } + } + ], + "responses": { + "201": { + "description": "Success", + "schema": { + "$ref": "#/definitions/MultiSlotRankResponse" + } + }, + "default": { + "description": "Invalid request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful MultiSlot_Rank request": { + "$ref": "./examples/MultiSlot_Rank.json" + } + } + } + }, + "/rank": { + "post": { + "tags": [ + "Rank" + ], + "summary": "Post Rank.", + "description": "Submit a Personalizer rank request. Receives a context and a list of actions. Returns which of the provided actions should be used by your application, in rewardActionId.", + "operationId": "Rank", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "rankRequest", + "description": "A Personalizer Rank request.", + "required": true, + "schema": { + "$ref": "#/definitions/RankRequest" + } + } + ], + "responses": { + "201": { + "description": "Success", + "schema": { + "$ref": "#/definitions/RankResponse" + } + }, + "default": { + "description": "Invalid request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-ms-examples": { + "Successful Rank request": { + "$ref": "./examples/Rank.json" + } + } + } + } + }, + "definitions": { + "ApprenticeLearningMetrics": { + "required": [ + "numberOfEvents", + "numberOfImitatedEvents", + "sumOfImitatedRewards", + "sumOfRewards" + ], + "type": "object", + "properties": { + "numberOfEvents": { + "format": "int64", + "type": "integer" + }, + "sumOfRewards": { + "format": "float", + "type": "number" + }, + "numberOfImitatedEvents": { + "format": "int64", + "type": "integer" + }, + "sumOfImitatedRewards": { + "format": "float", + "type": "number" + } + } + }, + "ApprenticeModeMetrics": { + "required": [ + "lastProcessedEventTime", + "numberOfEvents", + "numberOfImitatedEvents", + "startTime", + "sumOfImitatedRewards", + "sumOfRewards" + ], + "type": "object", + "properties": { + "startTime": { + "format": "date-time", + "type": "string" + }, + "lastProcessedEventTime": { + "format": "date-time", + "type": "string" + }, + "lastBatchMetrics": { + "$ref": "#/definitions/ApprenticeLearningMetrics" + }, + "numberOfEvents": { + "format": "int64", + "type": "integer" + }, + "sumOfRewards": { + "format": "float", + "type": "number" + }, + "numberOfImitatedEvents": { + "format": "int64", + "type": "integer" + }, + "sumOfImitatedRewards": { + "format": "float", + "type": "number" + } + } + }, + "ServiceConfiguration": { + "description": "The configuration of the service.", + "required": [ + "defaultReward", + "explorationPercentage", + "logRetentionDays", + "modelExportFrequency", + "rewardAggregation", + "rewardWaitTime" + ], + "type": "object", + "properties": { + "rewardWaitTime": { + "format": "duration", + "description": "The time span waited until a request is marked with the default reward\r\nand should be between 5 seconds and 2 days.\r\nFor example, PT5M (5 mins). For information about the time format,\r\nsee http://en.wikipedia.org/wiki/ISO_8601#Durations", + "type": "string" + }, + "defaultReward": { + "format": "float", + "description": "The reward given if a reward is not received within the specified wait time.", + "maximum": 1, + "minimum": -1, + "type": "number" + }, + "rewardAggregation": { + "description": "The function used to process rewards, if multiple reward scores are received before rewardWaitTime is over.", + "maxLength": 256, + "type": "string" + }, + "explorationPercentage": { + "format": "float", + "description": "The percentage of rank responses that will use exploration.", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "modelExportFrequency": { + "format": "duration", + "description": "Personalizer will start using the most updated trained model for online ranks automatically every specified time period.\r\nFor example, PT5M (5 mins). For information about the time format,\r\nsee http://en.wikipedia.org/wiki/ISO_8601#Durations", + "type": "string" + }, + "logMirrorEnabled": { + "description": "Flag indicates whether log mirroring is enabled.", + "type": "boolean" + }, + "logMirrorSasUri": { + "description": "Azure storage account container SAS URI for log mirroring.", + "type": "string" + }, + "logRetentionDays": { + "format": "int32", + "description": "Number of days historical logs are to be maintained. -1 implies the logs will never be deleted.", + "maximum": 2147483647, + "minimum": -1, + "type": "integer" + }, + "lastConfigurationEditDate": { + "format": "date-time", + "description": "Last time model training configuration was updated", + "type": "string" + }, + "learningMode": { + "description": "Learning Modes for Personalizer", + "enum": [ + "Online", + "Apprentice", + "LoggingOnly" + ], + "type": "string", + "x-ms-enum": { + "name": "LearningMode", + "modelAsString": true + } + }, + "latestApprenticeModeMetrics": { + "$ref": "#/definitions/ApprenticeModeMetrics" + }, + "isAutoOptimizationEnabled": { + "description": "Flag indicating whether Personalizer will automatically optimize Learning Settings by running Offline Evaluations periodically.", + "type": "boolean" + }, + "autoOptimizationFrequency": { + "format": "duration", + "description": "Frequency of automatic optimization. Only relevant if IsAutoOptimizationEnabled is true.\r\nFor example, PT5M (5 mins). For information about the time format,\r\n\\r\\nsee http://en.wikipedia.org/wiki/ISO_8601#Durations", + "type": "string" + }, + "autoOptimizationStartDate": { + "format": "date-time", + "description": "Date when the first automatic optimization evaluation must be performed. Only relevant if IsAutoOptimizationEnabled is true.", + "type": "string" + } + } + }, + "InternalError": { + "description": "An object containing more specific information than the parent object about the error.", + "type": "object", + "properties": { + "code": { + "description": "Detailed error code.", + "type": "string" + }, + "innererror": { + "$ref": "#/definitions/InternalError" + } + } + }, + "PersonalizerError": { + "description": "The error object.", + "required": [ + "code", + "message" + ], + "type": "object", + "properties": { + "code": { + "description": "Error Codes returned by Personalizer", + "enum": [ + "BadRequest", + "InvalidServiceConfiguration", + "InvalidLearningModeServiceConfiguration", + "InvalidPolicyConfiguration", + "InvalidPolicyContract", + "InvalidEvaluationContract", + "DuplicateCustomPolicyNames", + "NoLogsExistInDateRange", + "LogsSizeExceedAllowedLimit", + "InvalidRewardRequest", + "InvalidEventIdToActivate", + "InvalidRankRequest", + "InvalidExportLogsRequest", + "InvalidContainer", + "InvalidModelMetadata", + "ApprenticeModeNeverTurnedOn", + "MissingAppId", + "InvalidRewardWaitTime", + "InvalidMultiSlotApiAccess", + "ModelFileAccessDenied", + "ProblemTypeIncompatibleWithAutoOptimization", + "ResourceNotFound", + "FrontEndNotFound", + "EvaluationNotFound", + "LearningSettingsNotFound", + "EvaluationModelNotFound", + "LogsPropertiesNotFound", + "ModelRankingError", + "InternalServerError", + "RankNullResponse", + "UpdateConfigurationFailed", + "ModelResetFailed", + "ModelPublishFailed", + "ModelMetadataUpdateFailed", + "OperationNotAllowed" + ], + "type": "string", + "x-ms-enum": { + "name": "PersonalizerErrorCode", + "modelAsString": true, + "values": [ + { + "value": "BadRequest", + "description": "Request could not be understood by the server." + }, + { + "value": "InvalidServiceConfiguration", + "description": "Invalid service configuration." + }, + { + "value": "InvalidLearningModeServiceConfiguration", + "description": "Updating defaultReward, rewardWaitTime and rewardAggregation when changing learning mode from Online to Apprentice mode and vice versa is not allowed. Make the mode change and then change the additional settings with an additional API call." + }, + { + "value": "InvalidPolicyConfiguration", + "description": "Invalid policy configuration." + }, + { + "value": "InvalidPolicyContract", + "description": "Invalid policy contract." + }, + { + "value": "InvalidEvaluationContract", + "description": "Invalid evaluation contract." + }, + { + "value": "DuplicateCustomPolicyNames", + "description": "Custom policy names should be unique." + }, + { + "value": "NoLogsExistInDateRange", + "description": "No logs exist in date range." + }, + { + "value": "LogsSizeExceedAllowedLimit", + "description": "Total size of logs exceed allowed limit." + }, + { + "value": "InvalidRewardRequest", + "description": "Invalid reward request." + }, + { + "value": "InvalidEventIdToActivate", + "description": "Invalid activate event request." + }, + { + "value": "InvalidRankRequest", + "description": "Invalid request." + }, + { + "value": "InvalidExportLogsRequest", + "description": "Invalid request." + }, + { + "value": "InvalidContainer", + "description": "SAS Uri must be the Uri to a container that has write permissions." + }, + { + "value": "InvalidModelMetadata", + "description": "Invalid model metadata." + }, + { + "value": "ApprenticeModeNeverTurnedOn", + "description": "Apprentice mode never turned on." + }, + { + "value": "MissingAppId", + "description": "AppId is missing in the header." + }, + { + "value": "InvalidRewardWaitTime", + "description": "Reward wait time should be between 5 seconds and 2 days" + }, + { + "value": "InvalidMultiSlotApiAccess", + "description": "Multi-slot feature is currently disabled. Please follow multi-slot Personalizer documentation to update your loop settings to enable multi-slot functionality." + }, + { + "value": "ModelFileAccessDenied", + "description": "Key vault Key used for customer managed key cannot be accessed." + }, + { + "value": "ProblemTypeIncompatibleWithAutoOptimization", + "description": "Auto-optimization is not compatible with multi-slot personalization." + }, + { + "value": "ResourceNotFound", + "description": "Requested resource does not exist on the server." + }, + { + "value": "FrontEndNotFound", + "description": "Front end not found." + }, + { + "value": "EvaluationNotFound", + "description": "Offline Evaluation not found." + }, + { + "value": "LearningSettingsNotFound", + "description": "Learning Settings not found in evaluation." + }, + { + "value": "EvaluationModelNotFound", + "description": "Model not found in evaluation." + }, + { + "value": "LogsPropertiesNotFound", + "description": "Log properties not found." + }, + { + "value": "ModelRankingError", + "description": "Error while ranking actions using model. Please verify the learning settings are valid." + }, + { + "value": "InternalServerError", + "description": "A generic error has occurred on the server." + }, + { + "value": "RankNullResponse", + "description": "Rank call returned null response." + }, + { + "value": "UpdateConfigurationFailed", + "description": "Failed to update configuration." + }, + { + "value": "ModelResetFailed", + "description": "Model reset failed." + }, + { + "value": "ModelPublishFailed", + "description": "Model publish failed." + }, + { + "value": "ModelMetadataUpdateFailed", + "description": "Model metadata update failed." + }, + { + "value": "OperationNotAllowed", + "description": "This operation is not allowed at this time." + } + ] + } + }, + "message": { + "description": "A message explaining the error reported by the service.", + "type": "string" + }, + "target": { + "description": "Error source element.", + "type": "string" + }, + "details": { + "description": "An array of details about specific errors that led to this reported error.", + "type": "array", + "items": { + "$ref": "#/definitions/PersonalizerError" + } + }, + "innerError": { + "$ref": "#/definitions/InternalError" + } + } + }, + "ErrorResponse": { + "description": "Used to return an error to the client", + "required": [ + "error" + ], + "type": "object", + "properties": { + "error": { + "$ref": "#/definitions/PersonalizerError" + } + } + }, + "PolicyContract": { + "description": "Learning settings specifying how to train the model.", + "required": [ + "arguments", + "name" + ], + "type": "object", + "properties": { + "name": { + "description": "Name of the learning settings.", + "maxLength": 256, + "type": "string" + }, + "arguments": { + "description": "Arguments of the learning settings.", + "maxLength": 1024, + "type": "string" + } + } + }, + "PolicyReferenceContract": { + "description": "Reference to the policy within the evaluation.", + "required": [ + "evaluationId", + "policyName" + ], + "type": "object", + "properties": { + "evaluationId": { + "description": "Evaluation Id of the evaluation.", + "maxLength": 256, + "type": "string" + }, + "policyName": { + "description": "Name of the learning settings.", + "maxLength": 256, + "type": "string" + } + } + }, + "PolicyResultSummary": { + "description": "This class contains the summary of evaluating a policy on a counterfactual evaluation.", + "type": "object", + "properties": { + "timeStamp": { + "format": "date-time", + "description": "Timestamp of the aggregation.", + "type": "string", + "readOnly": true + }, + "ipsEstimatorNumerator": { + "format": "float", + "description": "Numerator for IPS estimator.", + "type": "number", + "readOnly": true + }, + "ipsEstimatorDenominator": { + "format": "float", + "description": "Denominator for IPS estimator.", + "type": "number", + "readOnly": true + }, + "snipsEstimatorDenominator": { + "format": "float", + "description": "Denominator for SNIPS estimator.", + "type": "number", + "readOnly": true + }, + "aggregateTimeWindow": { + "format": "duration", + "description": "Time window for aggregation.\r\nFor example, PT5M (5 mins). For information about the time format,\r\nsee http://en.wikipedia.org/wiki/ISO_8601#Durations", + "type": "string", + "readOnly": true + }, + "nonZeroProbability": { + "format": "float", + "description": "Probability of non-zero values for the Policy evaluation.", + "type": "number" + }, + "sumOfSquares": { + "format": "float", + "description": "Sum of Squares for the Policy evaluation results.", + "type": "number", + "readOnly": true + }, + "confidenceInterval": { + "format": "float", + "description": "Gaussian confidence interval for the Policy evaluation.", + "type": "number", + "readOnly": true + }, + "averageReward": { + "format": "float", + "description": "Average reward.", + "type": "number", + "readOnly": true + } + } + }, + "PolicyResult": { + "description": "This class contains the Learning Settings information and the results of the Offline Evaluation using that policy.", + "type": "object", + "properties": { + "name": { + "description": "The name of the Learning Settings.", + "type": "string", + "readOnly": true + }, + "arguments": { + "description": "The arguments of the Learning Settings.", + "type": "string", + "readOnly": true + }, + "policySource": { + "description": "The source of the Learning Settings.", + "enum": [ + "Online", + "Baseline", + "Random", + "Custom", + "OfflineExperimentation" + ], + "type": "string", + "readOnly": true, + "x-ms-enum": { + "name": "PolicySource", + "modelAsString": true + } + }, + "summary": { + "description": "The aggregate results of the Offline Evaluation.", + "type": "array", + "items": { + "$ref": "#/definitions/PolicyResultSummary" + }, + "readOnly": true + }, + "totalSummary": { + "allOf": [ + { + "$ref": "#/definitions/PolicyResultSummary" + } + ], + "readOnly": true + } + } + }, + "Evaluation": { + "description": "A counterfactual evaluation.", + "type": "object", + "properties": { + "id": { + "description": "The ID of the evaluation.", + "maxLength": 256, + "type": "string", + "readOnly": true + }, + "name": { + "description": "The name of the evaluation.", + "maxLength": 256, + "type": "string", + "readOnly": true + }, + "startTime": { + "format": "date-time", + "description": "The start time of the evaluation.", + "type": "string", + "readOnly": true + }, + "endTime": { + "format": "date-time", + "description": "The end time of the evaluation.", + "type": "string", + "readOnly": true + }, + "jobId": { + "description": "The ID of the job processing the evaluation.", + "type": "string", + "readOnly": true + }, + "status": { + "description": "The status of the job processing the evaluation.", + "enum": [ + "completed", + "pending", + "failed", + "notSubmitted", + "timeout", + "optimalPolicyApplied", + "onlinePolicyRetained" + ], + "type": "string", + "readOnly": true, + "x-ms-enum": { + "name": "EvaluationJobStatus", + "modelAsString": true + } + }, + "policyResults": { + "description": "The results of the evaluation.", + "type": "array", + "items": { + "$ref": "#/definitions/PolicyResult" + } + }, + "featureImportance": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "evaluationType": { + "enum": [ + "Manual", + "Auto" + ], + "type": "string", + "x-ms-enum": { + "name": "EvaluationType", + "modelAsString": true + } + }, + "optimalPolicy": { + "type": "string" + }, + "creationTime": { + "format": "date-time", + "type": "string" + } + } + }, + "EvaluationContract": { + "description": "A counterfactual evaluation.", + "required": [ + "endTime", + "name", + "policies", + "startTime" + ], + "type": "object", + "properties": { + "enableOfflineExperimentation": { + "description": "True if the evaluation should explore for a more optimal learning settings.", + "type": "boolean" + }, + "name": { + "description": "The name of the evaluation.", + "maxLength": 256, + "type": "string" + }, + "startTime": { + "format": "date-time", + "description": "The start time of the evaluation.", + "type": "string" + }, + "endTime": { + "format": "date-time", + "description": "The end time of the evaluation.", + "type": "string" + }, + "policies": { + "description": "Additional learning settings to evaluate.", + "type": "array", + "items": { + "$ref": "#/definitions/PolicyContract" + } + } + } + }, + "RewardRequest": { + "description": "Reward given to a rank response.", + "required": [ + "value" + ], + "type": "object", + "properties": { + "value": { + "format": "float", + "description": "Reward to be assigned to an action. Value is a float calculated by your application, typically between 0 and 1, and must be between -1 and 1.", + "type": "number" + } + } + }, + "DateRange": { + "description": "A date range starting at From and ending at To.", + "type": "object", + "properties": { + "from": { + "format": "date-time", + "description": "Start date for the range.", + "type": "string", + "readOnly": true + }, + "to": { + "format": "date-time", + "description": "End date for the range.", + "type": "string", + "readOnly": true + } + } + }, + "LogsProperties": { + "description": "Properties related to data used to train the model.", + "type": "object", + "properties": { + "dateRange": { + "allOf": [ + { + "$ref": "#/definitions/DateRange" + } + ], + "readOnly": true + } + } + }, + "ModelProperties": { + "description": "Properties related to the trained model.", + "type": "object", + "properties": { + "creationTime": { + "format": "date-time", + "description": "Creation time of the model.", + "type": "string", + "readOnly": true + }, + "lastModifiedTime": { + "format": "date-time", + "description": "Last time the model was modified.", + "type": "string", + "readOnly": true + } + } + }, + "SlotReward": { + "required": [ + "slotId", + "value" + ], + "type": "object", + "properties": { + "slotId": { + "description": "Slot id for which we are sending the reward.", + "maxLength": 256, + "minLength": 1, + "type": "string" + }, + "value": { + "format": "float", + "description": "Reward to be assigned to slotId. Value should be between -1 and 1 inclusive.", + "type": "number" + } + } + }, + "MultiSlotRewardRequest": { + "description": "Reward given to a list of slots.", + "required": [ + "reward" + ], + "type": "object", + "properties": { + "reward": { + "type": "array", + "items": { + "$ref": "#/definitions/SlotReward" + } + } + } + }, + "RankableAction": { + "description": "An action with its associated features used for ranking.", + "required": [ + "features", + "id" + ], + "type": "object", + "properties": { + "id": { + "description": "Id of the action.", + "maxLength": 256, + "type": "string" + }, + "features": { + "description": "List of dictionaries containing features.", + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "SlotRequest": { + "description": "A slot with it's associated features and list of excluded actions", + "required": [ + "baselineAction", + "id" + ], + "type": "object", + "properties": { + "id": { + "description": "Slot ID", + "type": "string" + }, + "features": { + "description": "List of dictionaries containing slot features.", + "type": "array", + "items": { + "type": "object" + } + }, + "excludedActions": { + "description": "List of excluded action Ids.", + "type": "array", + "items": { + "type": "string" + } + }, + "baselineAction": { + "description": "The 'baseline action' ID for the slot.\r\nThe BaselineAction is the Id of the Action your application would use in that slot if Personalizer didn't exist.\r\nBaselineAction must be defined for every slot.\r\nBaselineAction should never be part of ExcludedActions.\r\nEach slot must have a unique BaselineAction which corresponds to an an action from the event's Actions list.", + "type": "string" + } + } + }, + "MultiSlotRankRequest": { + "required": [ + "actions", + "slots" + ], + "type": "object", + "properties": { + "contextFeatures": { + "description": "Features of the context used for Personalizer as a\r\ndictionary of dictionaries. This is determined by your application, and\r\ntypically includes features about the current user, their\r\ndevice, profile information, aggregated data about time and date, etc.\r\nFeatures should not include personally identifiable information (PII),\r\nunique UserIDs, or precise timestamps.", + "type": "array", + "items": { + "type": "object" + } + }, + "actions": { + "description": "The set of actions the Personalizer service can pick from.\r\nThe set should not contain more than 50 actions.\r\nThe order of the actions does not affect the rank result but the order\r\nshould match the sequence your application would have used to display them.\r\nThe first item in the array will be used as Baseline item in Offline Evaluations.", + "type": "array", + "items": { + "$ref": "#/definitions/RankableAction" + } + }, + "slots": { + "description": "The set of slots the Personalizer service should select actions for.\r\nThe set should not contain more than 50 slots.", + "type": "array", + "items": { + "$ref": "#/definitions/SlotRequest" + } + }, + "eventId": { + "description": "Optionally pass an eventId that uniquely identifies this Rank event.\r\nIf null, the service generates a unique eventId. The eventId will be used for\r\nassociating this request with its reward, as well as seeding the pseudo-random\r\ngenerator when making a Personalizer call.", + "maxLength": 256, + "type": "string" + }, + "deferActivation": { + "description": "Send false if it is certain the rewardActionId in rank results will be shown to the user, therefore\r\nPersonalizer will expect a Reward call, otherwise it will assign the default\r\nReward to the event. Send true if it is possible the user will not see the action specified in the rank results,\r\n(e.g. because the page is rendering later, or the Rank results may be overridden by code further downstream).\r\nYou must call the Activate Event API if the event output is shown to users, otherwise Rewards will be ignored.", + "default": false, + "type": "boolean" + } + } + }, + "SlotResponse": { + "required": [ + "id" + ], + "type": "object", + "properties": { + "id": { + "description": "Id is the slot ID.", + "maxLength": 256, + "type": "string" + }, + "rewardActionId": { + "description": "RewardActionID is the action ID recommended by Personalizer.", + "maxLength": 256, + "type": "string", + "readOnly": true + } + } + }, + "MultiSlotRankResponse": { + "type": "object", + "properties": { + "slots": { + "description": "Each slot has a corresponding rewardActionID which is the action ID recommended by Personalizer.", + "type": "array", + "items": { + "$ref": "#/definitions/SlotResponse" + }, + "readOnly": true + }, + "eventId": { + "description": "The eventId for the round trip from request to response.", + "maxLength": 256, + "type": "string", + "readOnly": true + } + } + }, + "RankRequest": { + "description": "Request a set of actions to be ranked by the Personalizer service.", + "required": [ + "actions" + ], + "type": "object", + "properties": { + "contextFeatures": { + "description": "Features of the context used for Personalizer as a\r\ndictionary of dictionaries. This is determined by your application, and\r\ntypically includes features about the current user, their\r\ndevice, profile information, aggregated data about time and date, etc.\r\nFeatures should not include personally identifiable information (PII),\r\nunique UserIDs, or precise timestamps.", + "type": "array", + "items": { + "type": "object" + } + }, + "actions": { + "description": "The set of actions the Personalizer service can pick from.\r\nThe set should not contain more than 50 actions.\r\nThe order of the actions does not affect the rank result but the order\r\nshould match the sequence your application would have used to display them.\r\nThe first item in the array will be used as Baseline item in Offline Evaluations.", + "type": "array", + "items": { + "$ref": "#/definitions/RankableAction" + } + }, + "excludedActions": { + "description": "The set of action ids to exclude from ranking.\r\nPersonalizer will consider the first non-excluded item in the array as the Baseline action when performing Offline Evaluations.", + "type": "array", + "items": { + "type": "string" + } + }, + "eventId": { + "description": "Optionally pass an eventId that uniquely identifies this Rank event.\r\nIf null, the service generates a unique eventId. The eventId will be used for\r\nassociating this request with its reward, as well as seeding the pseudo-random\r\ngenerator when making a Personalizer call.", + "maxLength": 256, + "type": "string" + }, + "deferActivation": { + "description": "Send false if it is certain the rewardActionId in rank results will be shown to the user, therefore\r\nPersonalizer will expect a Reward call, otherwise it will assign the default\r\nReward to the event. Send true if it is possible the user will not see the action specified in the rank results,\r\n(e.g. because the page is rendering later, or the Rank results may be overridden by code further downstream).\r\nYou must call the Activate Event API if the event output is shown to users, otherwise Rewards will be ignored.", + "default": false, + "type": "boolean" + } + } + }, + "RankedAction": { + "description": "A ranked action with its resulting probability.", + "type": "object", + "properties": { + "id": { + "description": "Id of the action", + "maxLength": 256, + "type": "string", + "readOnly": true + }, + "probability": { + "format": "float", + "description": "Probability of the action", + "maximum": 1, + "minimum": 0, + "type": "number", + "readOnly": true + } + } + }, + "RankResponse": { + "description": "Returns which action to use as rewardActionId, and additional information about each action as a result of a Rank request.", + "type": "object", + "properties": { + "ranking": { + "description": "The calculated ranking for the current request.", + "type": "array", + "items": { + "$ref": "#/definitions/RankedAction" + }, + "readOnly": true + }, + "eventId": { + "description": "The eventId for the round trip from request to response.", + "maxLength": 256, + "type": "string", + "readOnly": true + }, + "rewardActionId": { + "description": "The action chosen by the Personalizer service.\r\nThis is the action your application should display, and for which to report the reward.\r\nThis might not be the first found in 'ranking'.", + "maxLength": 256, + "type": "string", + "readOnly": true + } + } + }, + "ServiceStatus": { + "type": "object", + "properties": { + "service": { + "type": "string" + }, + "apiStatus": { + "type": "string" + }, + "apiStatusMessage": { + "type": "string" + } + } + } + }, + "parameters": { + "Endpoint": { + "in": "path", + "name": "Endpoint", + "description": "Supported Cognitive Services endpoint.", + "required": true, + "type": "string", + "x-ms-parameter-location": "client", + "x-ms-skip-url-encoding": true + } + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + {} + ], + "tags": [ + { + "name": "ConfigurationsV1Dot1Preview1", + "description": "Manages configuration operations." + }, + { + "name": "EvaluationsV1Dot1Preview1", + "description": "Manages counterfactual evaluation operations." + }, + { + "name": "MultiSlotEvents", + "description": "Manages multi-slot event operations." + }, + { + "name": "MultiSlotRank", + "description": "Manages multi-slot ranking operations." + }, + { + "name": "Events", + "description": "Manages event operations." + }, + { + "name": "Logs", + "description": "Manages reinforcement learning logs." + }, + { + "name": "Model", + "description": "Manages reinforcement learning configuration operations." + }, + { + "name": "Rank", + "description": "Manages ranking operations." + } + ], + "x-ms-parameterized-host": { + "hostTemplate": "{Endpoint}", + "useSchemePrefix": false, + "parameters": [ + { + "$ref": "#/parameters/Endpoint" + } + ] + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluation_Apply.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluation_Apply.json new file mode 100644 index 000000000000..17b6122af1f9 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluation_Apply.json @@ -0,0 +1,13 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "Endpoint": "{Endpoint}", + "body": { + "evaluationId": "b58c6d92-b727-48c1-9487-4be2782c9e0a", + "policyName": "online" + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Create.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Create.json new file mode 100644 index 000000000000..59151af299b1 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Create.json @@ -0,0 +1,43 @@ +{ + "responses": { + "201": { + "headers": { + "Location": "{endpoint}/personalizer/v1.1-preview.1/evaluations/{evaluationId}" + }, + "body": { + "id": "b58c6d92-b727-48c1-9487-4be2782c9e0a", + "name": "myFirstEvaluation", + "startTime": "2018-12-19T00:00:00Z", + "endTime": "2019-01-19T00:00:00Z", + "status": "pending", + "policyResults": [ + { + "name": "Custom learning settings 1", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5", + "policySource": "Online" + } + ], + "featureImportance": [], + "evaluationType": "Manual", + "optimalPolicy": "", + "creationTime": "2019-01-20T00:00:00Z" + } + } + }, + "parameters": { + "Endpoint": "{Endpoint}", + "evaluation": { + "enableOfflineExperimentation": true, + "name": "myFirstEvaluation", + "startTime": "2018-12-19T00:00:00Z", + "endTime": "2019-01-19T00:00:00Z", + "policies": [ + { + "name": "Custom learning settings 1", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5" + } + ] + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Delete.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Delete.json new file mode 100644 index 000000000000..1b8773e08e38 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Delete.json @@ -0,0 +1,10 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "evaluationId": "id", + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Get.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Get.json new file mode 100644 index 000000000000..11eb9fc459e3 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_Get.json @@ -0,0 +1,74 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "id": "b58c6d92-b727-48c1-9487-4be2782c9e0a", + "name": "myFirstEvaluation", + "startTime": "2018-12-19T00:00:00Z", + "endTime": "2019-01-19T00:00:00Z", + "status": "completed", + "policyResults": [ + { + "name": "Custom learning settings 1", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5", + "policySource": "Custom", + "summary": [ + { + "timeStamp": "2018-12-19T00:00:00Z", + "ipsEstimatorNumerator": 0.0, + "ipsEstimatorDenominator": 170.0, + "snipsEstimatorDenominator": 308.25, + "aggregateTimeWindow": "PT0S", + "nonZeroProbability": 64.0, + "sumOfSquares": 0.0, + "confidenceInterval": 0.0, + "averageReward": 0.0 + }, + { + "timeStamp": "2018-12-19T00:05:00Z", + "ipsEstimatorNumerator": 2.2, + "ipsEstimatorDenominator": 196.0, + "snipsEstimatorDenominator": 193.761, + "aggregateTimeWindow": "PT0S", + "nonZeroProbability": 68.0, + "sumOfSquares": 2.424, + "confidenceInterval": 0.015529361, + "averageReward": 0.01122449 + } + ], + "totalSummary": { + "timeStamp": "2019-01-19T00:00:00Z", + "ipsEstimatorNumerator": 22.2, + "ipsEstimatorDenominator": 1906.0, + "snipsEstimatorDenominator": 1993.761, + "aggregateTimeWindow": "PT0S", + "nonZeroProbability": 68.0, + "sumOfSquares": 2.484, + "confidenceInterval": 0.0015344538, + "averageReward": 0.011647429 + } + } + ], + "featureImportance": [ + [ + "f1", + "f2" + ], + [ + "f3", + "f4" + ] + ], + "evaluationType": "Manual", + "optimalPolicy": "Custom learning settings 1", + "creationTime": "2019-01-20T00:00:00Z" + } + } + }, + "parameters": { + "evaluationId": "id", + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_List.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_List.json new file mode 100644 index 000000000000..18fe22e94bf8 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Evaluations_List.json @@ -0,0 +1,54 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": [ + { + "id": "b58c6d92-b727-48c1-9487-4be2782c9e0a", + "name": "myFirstEvaluation", + "startTime": "2018-11-19T00:00:00Z", + "endTime": "2018-12-19T00:00:00Z", + "status": "completed", + "policyResults": [ + { + "name": "Custom learning settings 1", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5", + "policySource": "Online", + "totalSummary": { + "timeStamp": "2018-12-19T00:00:00Z", + "ipsEstimatorNumerator": 18.0, + "ipsEstimatorDenominator": 36.0, + "snipsEstimatorDenominator": 1.0, + "aggregateTimeWindow": "PT0S", + "nonZeroProbability": 1.1, + "sumOfSquares": 12.28, + "confidenceInterval": 0.100001775, + "averageReward": 0.5 + } + } + ], + "featureImportance": [], + "evaluationType": "Manual", + "optimalPolicy": "Custom learning settings 1", + "creationTime": "2019-01-19T00:00:00Z" + }, + { + "id": "21d03972-9130-4be9-8c8b-8ac3ec9b9dd1", + "name": "mySecondEvaluation", + "startTime": "2018-12-19T00:00:00Z", + "endTime": "2019-01-19T00:00:00Z", + "status": "pending", + "policyResults": [], + "featureImportance": [], + "evaluationType": "Manual", + "optimalPolicy": "", + "creationTime": "2019-01-19T00:00:00Z" + } + ] + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Events_Activate.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Events_Activate.json new file mode 100644 index 000000000000..5ad279ba6d85 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Events_Activate.json @@ -0,0 +1,10 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "eventId": "id", + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Events_Reward.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Events_Reward.json new file mode 100644 index 000000000000..747305a2680f --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Events_Reward.json @@ -0,0 +1,13 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "eventId": "id", + "Endpoint": "{Endpoint}", + "reward": { + "value": 1.0 + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Log_Delete.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Log_Delete.json new file mode 100644 index 000000000000..3e1ff333b3b9 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Log_Delete.json @@ -0,0 +1,9 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Log_GetProperties.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Log_GetProperties.json new file mode 100644 index 000000000000..001264140641 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Log_GetProperties.json @@ -0,0 +1,17 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "dateRange": { + "from": "2019-01-18T16:00:00-08:00", + "to": "2019-02-18T16:00:00-08:00" + } + } + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_Get.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_Get.json new file mode 100644 index 000000000000..97276daa2c84 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_Get.json @@ -0,0 +1,12 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": "" + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_GetProperties.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_GetProperties.json new file mode 100644 index 000000000000..b6e1f1c38d8a --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_GetProperties.json @@ -0,0 +1,15 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "creationTime": "2019-01-18T16:00:00-08:00", + "lastModifiedTime": "2019-01-18T16:00:00-08:00" + } + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_Reset.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_Reset.json new file mode 100644 index 000000000000..3e1ff333b3b9 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Model_Reset.json @@ -0,0 +1,9 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlotEvents_Activate.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlotEvents_Activate.json new file mode 100644 index 000000000000..5ad279ba6d85 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlotEvents_Activate.json @@ -0,0 +1,10 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "eventId": "id", + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlotEvents_Reward.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlotEvents_Reward.json new file mode 100644 index 000000000000..ce9f5ef2badd --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlotEvents_Reward.json @@ -0,0 +1,22 @@ +{ + "responses": { + "204": {} + }, + "parameters": { + "eventId": "id", + "Endpoint": "{Endpoint}", + "body": { + "reward": [ + { + "slotId": "Heroposition", + "value": 0.5 + }, + { + "slotId": "SideBar", + "value": 0.6 + } + ] + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlot_Rank.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlot_Rank.json new file mode 100644 index 000000000000..5d8cd1ec1db2 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/MultiSlot_Rank.json @@ -0,0 +1,104 @@ +{ + "responses": { + "201": { + "headers": {}, + "body": { + "slots": [ + { + "id": "Main Article", + "rewardActionId": "EntertainmentArticle" + }, + { + "id": "Side Bar", + "rewardActionId": "SportsArticle" + } + ], + "eventId": "75269AD0-BFEE-4598-8196-C57383D38E10" + } + } + }, + "parameters": { + "Endpoint": "{Endpoint}", + "body": { + "contextFeatures": [ + { + "user": { + "profileType": "AnonymousUser", + "latLong": "47.6,-122.1" + } + }, + { + "environment": { + "dayOfMonth": "28", + "monthOfYear": "8", + "weather": "Sunny" + } + }, + { + "device": { + "mobile": true, + "windows": true + } + }, + { + "recentActivity": { + "itemsInCart": 3 + } + } + ], + "actions": [ + { + "id": "NewsArticle", + "features": [ + { + "type": "News" + } + ] + }, + { + "id": "SportsArticle", + "features": [ + { + "type": "Sports" + } + ] + }, + { + "id": "EntertainmentArticle", + "features": [ + { + "type": "Entertainment" + } + ] + } + ], + "slots": [ + { + "id": "Main Article", + "features": [ + { + "size": "Large", + "position": "Top Middle" + } + ], + "excludedActions": [ + "SportsArticle" + ], + "baselineAction": "EntertainmentArticle" + }, + { + "id": "Side Bar", + "features": [ + { + "size": "Small" + } + ], + "baselineAction": "NewsArticle" + } + ], + "eventId": "75269AD0-BFEE-4598-8196-C57383D38E10", + "deferActivation": false + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Get.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Get.json new file mode 100644 index 000000000000..a59c927b68dd --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Get.json @@ -0,0 +1,15 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "name": "myPersonalizer", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5" + } + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Reset.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Reset.json new file mode 100644 index 000000000000..a59c927b68dd --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Reset.json @@ -0,0 +1,15 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "name": "myPersonalizer", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5" + } + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Update.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Update.json new file mode 100644 index 000000000000..00236ae7214a --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Policy_Update.json @@ -0,0 +1,19 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "name": "myPersonalizer", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5" + } + } + }, + "parameters": { + "Endpoint": "{Endpoint}", + "policy": { + "name": "myPersonalizer", + "arguments": "--cb_explore_adf --epsilon 0.2 --dsjson --cb_type ips -l 0.5 --l1 1E-07 --power_t 0.5" + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Rank.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Rank.json new file mode 100644 index 000000000000..a3fd69260990 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/Rank.json @@ -0,0 +1,88 @@ +{ + "responses": { + "201": { + "headers": {}, + "body": { + "ranking": [ + { + "id": "EntertainmentArticle", + "probability": 0.8 + }, + { + "id": "SportsArticle", + "probability": 0.0 + }, + { + "id": "NewsArticle", + "probability": 0.2 + } + ], + "eventId": "75269AD0-BFEE-4598-8196-C57383D38E10", + "rewardActionId": "EntertainmentArticle" + } + } + }, + "parameters": { + "Endpoint": "{Endpoint}", + "rankRequest": { + "contextFeatures": [ + { + "user": { + "profileType": "AnonymousUser", + "latLong": "47.6,-122.1" + } + }, + { + "environment": { + "dayOfMonth": "28", + "monthOfYear": "8", + "weather": "Sunny" + } + }, + { + "device": { + "mobile": true, + "windows": true + } + }, + { + "recentActivity": { + "itemsInCart": 3 + } + } + ], + "actions": [ + { + "id": "NewsArticle", + "features": [ + { + "type": "News" + } + ] + }, + { + "id": "SportsArticle", + "features": [ + { + "type": "Sports" + } + ] + }, + { + "id": "EntertainmentArticle", + "features": [ + { + "type": "Entertainment" + } + ] + } + ], + "excludedActions": [ + "SportsArticle" + ], + "eventId": "75269AD0-BFEE-4598-8196-C57383D38E10", + "deferActivation": false + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/ServiceConfiguration_Get.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/ServiceConfiguration_Get.json new file mode 100644 index 000000000000..fd4a7629b683 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/ServiceConfiguration_Get.json @@ -0,0 +1,26 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "rewardWaitTime": "PT10M", + "defaultReward": 0.0, + "rewardAggregation": "earliest", + "explorationPercentage": 0.2, + "modelExportFrequency": "PT5M", + "logMirrorEnabled": true, + "logMirrorSasUri": "https://testblob.blob.core.windows.net/container?se=2020-08-13T00%3A00Z&sp=rwl&spr=https&sv=2018-11-09&sr=c&sig=signature", + "logRetentionDays": 7, + "lastConfigurationEditDate": "0001-01-01T00:00:00Z", + "learningMode": "Online", + "isAutoOptimizationEnabled": true, + "autoOptimizationFrequency": "P7D", + "autoOptimizationStartDate": "2019-01-19T00:00:00Z" + } + } + }, + "parameters": { + "Ocp-Apim-Subscription-Key": "{API key}", + "Endpoint": "{Endpoint}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/ServiceConfiguration_Update.json b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/ServiceConfiguration_Update.json new file mode 100644 index 000000000000..d7282c6809a4 --- /dev/null +++ b/specification/cognitiveservices/data-plane/Personalizer/preview/v1.1-preview.1/examples/ServiceConfiguration_Update.json @@ -0,0 +1,41 @@ +{ + "responses": { + "200": { + "headers": {}, + "body": { + "rewardWaitTime": "PT10M", + "defaultReward": 0.0, + "rewardAggregation": "earliest", + "explorationPercentage": 0.2, + "modelExportFrequency": "PT5M", + "logMirrorEnabled": true, + "logMirrorSasUri": "https://testblob.blob.core.windows.net/container?se=2020-08-13T00%3A00Z&sp=rwl&spr=https&sv=2018-11-09&sr=c&sig=signature", + "logRetentionDays": 7, + "lastConfigurationEditDate": "0001-01-01T00:00:00Z", + "learningMode": "Online", + "isAutoOptimizationEnabled": true, + "autoOptimizationFrequency": "P7D", + "autoOptimizationStartDate": "2019-01-19T00:00:00Z" + } + } + }, + "parameters": { + "Endpoint": "{Endpoint}", + "config": { + "rewardWaitTime": "PT10M", + "defaultReward": 0.0, + "rewardAggregation": "earliest", + "explorationPercentage": 0.2, + "modelExportFrequency": "PT5M", + "logMirrorEnabled": true, + "logMirrorSasUri": "https://testblob.blob.core.windows.net/container?se=2020-08-13T00%3A00Z&sp=rwl&spr=https&sv=2018-11-09&sr=c&sig=signature", + "logRetentionDays": 7, + "lastConfigurationEditDate": "0001-01-01T00:00:00Z", + "learningMode": "Online", + "isAutoOptimizationEnabled": true, + "autoOptimizationFrequency": "P7D", + "autoOptimizationStartDate": "2019-01-19T00:00:00Z" + }, + "Ocp-Apim-Subscription-Key": "{API key}" + } +} diff --git a/specification/cognitiveservices/data-plane/Personalizer/readme.md b/specification/cognitiveservices/data-plane/Personalizer/readme.md index 888fcc5d55a7..612669f6ca1b 100644 --- a/specification/cognitiveservices/data-plane/Personalizer/readme.md +++ b/specification/cognitiveservices/data-plane/Personalizer/readme.md @@ -28,6 +28,14 @@ input-file: - preview/v1.0/Personalizer.json ``` +### Release 1.0-Preview +These settings apply only when `--tag=release_1_1_preview.1` is specified on the command line. + +``` yaml $(tag) == 'release_1_1_preview.1' +input-file: + - preview/v1.1-preview.1/Personalizer.json +``` + ## Swagger to SDK This section describes what SDK should be generated by the automatic system.