From 3ff9443c96bb1b2d628863013de7cda15132be0b Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Wed, 30 Jun 2021 02:00:42 +0000 Subject: [PATCH] CodeGen from PR 14970 in Azure/azure-rest-api-specs [Hub Generated] Review request for Microsoft.Media to add version stable/2021-06-01 Part-1 (#14970) * Adds base for updating Microsoft.Media from version stable/2021-05-01 to version 2021-06-01 * Updates readme * Updates API version in new specs and examples * Update the branch with copy of all other services 2020-05-01 version files except encoding. * update AMS to 2021-06-01 Part1 (All swaggers except encoding) * fix some lint errors. * Fix an unreferenced file. * fix more linting errors * remove some unreferenced files. * Fix systemData in streamingservice * fix more linting errors * fix 404s * fix more lint errors * fix 204 issue for delete * fix definitions to use v2 common-types * remove type for the live event list list defenition * Revert "remove type for the live event list list defenition" This reverts commit 0f66253d284284037bf2c0d7a04c947d4dca7715. * removed unused definition --- sdk/media/azure-mgmt-media/_meta.json | 2 +- .../azure/mgmt/media/_azure_media_services.py | 21 +- .../azure/mgmt/media/models/__init__.py | 303 +- .../models/_azure_media_services_enums.py | 277 +- .../azure/mgmt/media/models/_models.py | 3679 ++-------------- .../azure/mgmt/media/models/_models_py3.py | 3723 ++--------------- .../azure/mgmt/media/models/_paged_models.py | 55 +- .../azure/mgmt/media/operations/__init__.py | 8 +- .../operations/_account_filters_operations.py | 26 +- .../operations/_asset_filters_operations.py | 26 +- .../media/operations/_assets_operations.py | 38 +- .../_content_key_policies_operations.py | 32 +- .../mgmt/media/operations/_jobs_operations.py | 468 --- .../operations/_live_events_operations.py | 42 +- .../operations/_live_outputs_operations.py | 22 +- .../media/operations/_locations_operations.py | 8 +- .../operations/_mediaservices_operations.py | 36 +- .../mgmt/media/operations/_operations.py | 81 +- ...private_endpoint_connections_operations.py | 22 +- .../_private_link_resources_operations.py | 12 +- .../_streaming_endpoints_operations.py | 38 +- .../_streaming_locators_operations.py | 30 +- .../_streaming_policies_operations.py | 22 +- .../operations/_transforms_operations.py | 402 -- 24 files changed, 936 insertions(+), 8437 deletions(-) delete mode 100644 sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py delete mode 100644 sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py diff --git a/sdk/media/azure-mgmt-media/_meta.json b/sdk/media/azure-mgmt-media/_meta.json index 0ef5900c072e..1701cfa48a84 100644 --- a/sdk/media/azure-mgmt-media/_meta.json +++ b/sdk/media/azure-mgmt-media/_meta.json @@ -1,7 +1,7 @@ { "autorest": "V2", "use": "@microsoft.azure/autorest.python@~4.0.71", - "commit": "d78e99bbaa8170a72c38d8a6dad8a896f9a5c636", + "commit": "68c2d7c5e56c4c70e46d7158f19cdbf29dbdbfc4", "repository_url": "https://github.com/Azure/azure-rest-api-specs", "autorest_command": "autorest specification/mediaservices/resource-manager/readme.md --keep-version-file --multiapi --no-async --python --python-mode=update --python-sdks-folder=/home/vsts/work/1/s/azure-sdk-for-python/sdk --use=@microsoft.azure/autorest.python@~4.0.71 --version=V2", "readme": "specification/mediaservices/resource-manager/readme.md" diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py index aba56940f2d1..582f849e3069 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py @@ -13,17 +13,15 @@ from msrest import Serializer, Deserializer from ._configuration import AzureMediaServicesConfiguration -from .operations import AccountFiltersOperations from .operations import Operations from .operations import MediaservicesOperations from .operations import PrivateLinkResourcesOperations from .operations import PrivateEndpointConnectionsOperations from .operations import LocationsOperations +from .operations import AccountFiltersOperations from .operations import AssetsOperations from .operations import AssetFiltersOperations from .operations import ContentKeyPoliciesOperations -from .operations import TransformsOperations -from .operations import JobsOperations from .operations import StreamingPoliciesOperations from .operations import StreamingLocatorsOperations from .operations import LiveEventsOperations @@ -38,8 +36,6 @@ class AzureMediaServices(SDKClient): :ivar config: Configuration for client. :vartype config: AzureMediaServicesConfiguration - :ivar account_filters: AccountFilters operations - :vartype account_filters: azure.mgmt.media.operations.AccountFiltersOperations :ivar operations: Operations operations :vartype operations: azure.mgmt.media.operations.Operations :ivar mediaservices: Mediaservices operations @@ -50,16 +46,14 @@ class AzureMediaServices(SDKClient): :vartype private_endpoint_connections: azure.mgmt.media.operations.PrivateEndpointConnectionsOperations :ivar locations: Locations operations :vartype locations: azure.mgmt.media.operations.LocationsOperations + :ivar account_filters: AccountFilters operations + :vartype account_filters: azure.mgmt.media.operations.AccountFiltersOperations :ivar assets: Assets operations :vartype assets: azure.mgmt.media.operations.AssetsOperations :ivar asset_filters: AssetFilters operations :vartype asset_filters: azure.mgmt.media.operations.AssetFiltersOperations :ivar content_key_policies: ContentKeyPolicies operations :vartype content_key_policies: azure.mgmt.media.operations.ContentKeyPoliciesOperations - :ivar transforms: Transforms operations - :vartype transforms: azure.mgmt.media.operations.TransformsOperations - :ivar jobs: Jobs operations - :vartype jobs: azure.mgmt.media.operations.JobsOperations :ivar streaming_policies: StreamingPolicies operations :vartype streaming_policies: azure.mgmt.media.operations.StreamingPoliciesOperations :ivar streaming_locators: StreamingLocators operations @@ -87,11 +81,10 @@ def __init__( super(AzureMediaServices, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2021-06-01' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) - self.account_filters = AccountFiltersOperations( - self._client, self.config, self._serialize, self._deserialize) self.operations = Operations( self._client, self.config, self._serialize, self._deserialize) self.mediaservices = MediaservicesOperations( @@ -102,16 +95,14 @@ def __init__( self._client, self.config, self._serialize, self._deserialize) self.locations = LocationsOperations( self._client, self.config, self._serialize, self._deserialize) + self.account_filters = AccountFiltersOperations( + self._client, self.config, self._serialize, self._deserialize) self.assets = AssetsOperations( self._client, self.config, self._serialize, self._deserialize) self.asset_filters = AssetFiltersOperations( self._client, self.config, self._serialize, self._deserialize) self.content_key_policies = ContentKeyPoliciesOperations( self._client, self.config, self._serialize, self._deserialize) - self.transforms = TransformsOperations( - self._client, self.config, self._serialize, self._deserialize) - self.jobs = JobsOperations( - self._client, self.config, self._serialize, self._deserialize) self.streaming_policies = StreamingPoliciesOperations( self._client, self.config, self._serialize, self._deserialize) self.streaming_locators = StreamingLocatorsOperations( diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py index 60f6a467199f..9b4b7a3c252d 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py @@ -10,30 +10,20 @@ # -------------------------------------------------------------------------- try: - from ._models_py3 import AacAudio - from ._models_py3 import AbsoluteClipTime from ._models_py3 import AccessControl from ._models_py3 import AccountEncryption from ._models_py3 import AccountFilter from ._models_py3 import AkamaiAccessControl from ._models_py3 import AkamaiSignatureHeaderAuthenticationKey - from ._models_py3 import ApiError, ApiErrorException from ._models_py3 import Asset from ._models_py3 import AssetContainerSas from ._models_py3 import AssetFileEncryptionMetadata from ._models_py3 import AssetFilter from ._models_py3 import AssetStreamingLocator - from ._models_py3 import Audio - from ._models_py3 import AudioAnalyzerPreset - from ._models_py3 import AudioOverlay - from ._models_py3 import AudioTrackDescriptor from ._models_py3 import AzureEntityResource - from ._models_py3 import BuiltInStandardEncoderPreset from ._models_py3 import CbcsDrmConfiguration from ._models_py3 import CencDrmConfiguration from ._models_py3 import CheckNameAvailabilityInput - from ._models_py3 import ClipTime - from ._models_py3 import Codec from ._models_py3 import CommonEncryptionCbcs from ._models_py3 import CommonEncryptionCenc from ._models_py3 import ContentKeyPolicy @@ -61,54 +51,25 @@ from ._models_py3 import ContentKeyPolicyUnknownRestriction from ._models_py3 import ContentKeyPolicyWidevineConfiguration from ._models_py3 import ContentKeyPolicyX509CertificateTokenKey - from ._models_py3 import CopyAudio - from ._models_py3 import CopyVideo from ._models_py3 import CrossSiteAccessPolicies from ._models_py3 import DefaultKey - from ._models_py3 import Deinterlace from ._models_py3 import EdgePolicies from ._models_py3 import EdgeUsageDataCollectionPolicy from ._models_py3 import EdgeUsageDataEventHub from ._models_py3 import EnabledProtocols from ._models_py3 import EntityNameAvailabilityCheckOutput from ._models_py3 import EnvelopeEncryption - from ._models_py3 import FaceDetectorPreset - from ._models_py3 import Filters + from ._models_py3 import ErrorAdditionalInfo + from ._models_py3 import ErrorDetail + from ._models_py3 import ErrorResponse, ErrorResponseException from ._models_py3 import FilterTrackPropertyCondition from ._models_py3 import FilterTrackSelection from ._models_py3 import FirstQuality - from ._models_py3 import Format - from ._models_py3 import FromAllInputFile - from ._models_py3 import FromEachInputFile - from ._models_py3 import H264Layer - from ._models_py3 import H264Video - from ._models_py3 import H265Layer - from ._models_py3 import H265Video - from ._models_py3 import H265VideoLayer from ._models_py3 import Hls - from ._models_py3 import Image - from ._models_py3 import ImageFormat - from ._models_py3 import InputDefinition - from ._models_py3 import InputFile from ._models_py3 import IPAccessControl from ._models_py3 import IPRange - from ._models_py3 import Job - from ._models_py3 import JobError - from ._models_py3 import JobErrorDetail - from ._models_py3 import JobInput - from ._models_py3 import JobInputAsset - from ._models_py3 import JobInputClip - from ._models_py3 import JobInputHttp - from ._models_py3 import JobInputs - from ._models_py3 import JobInputSequence - from ._models_py3 import JobOutput - from ._models_py3 import JobOutputAsset - from ._models_py3 import JpgFormat - from ._models_py3 import JpgImage - from ._models_py3 import JpgLayer from ._models_py3 import KeyDelivery from ._models_py3 import KeyVaultProperties - from ._models_py3 import Layer from ._models_py3 import ListContainerSasInput from ._models_py3 import ListContentKeysResponse from ._models_py3 import ListEdgePoliciesInput @@ -126,26 +87,17 @@ from ._models_py3 import LiveEventPreviewAccessControl from ._models_py3 import LiveEventTranscription from ._models_py3 import LiveOutput - from ._models_py3 import Location from ._models_py3 import LogSpecification from ._models_py3 import MediaService from ._models_py3 import MediaServiceIdentity from ._models_py3 import MediaServiceUpdate from ._models_py3 import MetricDimension from ._models_py3 import MetricSpecification - from ._models_py3 import Mp4Format - from ._models_py3 import MultiBitrateFormat from ._models_py3 import NoEncryption - from ._models_py3 import ODataError from ._models_py3 import Operation + from ._models_py3 import OperationCollection from ._models_py3 import OperationDisplay - from ._models_py3 import OutputFile - from ._models_py3 import Overlay - from ._models_py3 import PngFormat - from ._models_py3 import PngImage - from ._models_py3 import PngLayer from ._models_py3 import PresentationTimeRange - from ._models_py3 import Preset from ._models_py3 import PrivateEndpoint from ._models_py3 import PrivateEndpointConnection from ._models_py3 import PrivateEndpointConnectionListResult @@ -153,16 +105,10 @@ from ._models_py3 import PrivateLinkResourceListResult from ._models_py3 import PrivateLinkServiceConnectionState from ._models_py3 import Properties - from ._models_py3 import Provider from ._models_py3 import ProxyResource - from ._models_py3 import Rectangle from ._models_py3 import Resource - from ._models_py3 import SelectAudioTrackByAttribute - from ._models_py3 import SelectAudioTrackById - from ._models_py3 import SelectVideoTrackByAttribute - from ._models_py3 import SelectVideoTrackById + from ._models_py3 import ResourceIdentity from ._models_py3 import ServiceSpecification - from ._models_py3 import StandardEncoderPreset from ._models_py3 import StorageAccount from ._models_py3 import StorageEncryptedAssetDecryptionData from ._models_py3 import StreamingEndpoint @@ -179,44 +125,25 @@ from ._models_py3 import StreamingPolicyWidevineConfiguration from ._models_py3 import SyncStorageKeysInput from ._models_py3 import SystemData - from ._models_py3 import TrackDescriptor from ._models_py3 import TrackedResource from ._models_py3 import TrackPropertyCondition from ._models_py3 import TrackSelection - from ._models_py3 import Transform - from ._models_py3 import TransformOutput - from ._models_py3 import TransportStreamFormat - from ._models_py3 import UtcClipTime - from ._models_py3 import Video - from ._models_py3 import VideoAnalyzerPreset - from ._models_py3 import VideoLayer - from ._models_py3 import VideoOverlay - from ._models_py3 import VideoTrackDescriptor + from ._models_py3 import UserAssignedManagedIdentity except (SyntaxError, ImportError): - from ._models import AacAudio - from ._models import AbsoluteClipTime from ._models import AccessControl from ._models import AccountEncryption from ._models import AccountFilter from ._models import AkamaiAccessControl from ._models import AkamaiSignatureHeaderAuthenticationKey - from ._models import ApiError, ApiErrorException from ._models import Asset from ._models import AssetContainerSas from ._models import AssetFileEncryptionMetadata from ._models import AssetFilter from ._models import AssetStreamingLocator - from ._models import Audio - from ._models import AudioAnalyzerPreset - from ._models import AudioOverlay - from ._models import AudioTrackDescriptor from ._models import AzureEntityResource - from ._models import BuiltInStandardEncoderPreset from ._models import CbcsDrmConfiguration from ._models import CencDrmConfiguration from ._models import CheckNameAvailabilityInput - from ._models import ClipTime - from ._models import Codec from ._models import CommonEncryptionCbcs from ._models import CommonEncryptionCenc from ._models import ContentKeyPolicy @@ -244,54 +171,25 @@ from ._models import ContentKeyPolicyUnknownRestriction from ._models import ContentKeyPolicyWidevineConfiguration from ._models import ContentKeyPolicyX509CertificateTokenKey - from ._models import CopyAudio - from ._models import CopyVideo from ._models import CrossSiteAccessPolicies from ._models import DefaultKey - from ._models import Deinterlace from ._models import EdgePolicies from ._models import EdgeUsageDataCollectionPolicy from ._models import EdgeUsageDataEventHub from ._models import EnabledProtocols from ._models import EntityNameAvailabilityCheckOutput from ._models import EnvelopeEncryption - from ._models import FaceDetectorPreset - from ._models import Filters + from ._models import ErrorAdditionalInfo + from ._models import ErrorDetail + from ._models import ErrorResponse, ErrorResponseException from ._models import FilterTrackPropertyCondition from ._models import FilterTrackSelection from ._models import FirstQuality - from ._models import Format - from ._models import FromAllInputFile - from ._models import FromEachInputFile - from ._models import H264Layer - from ._models import H264Video - from ._models import H265Layer - from ._models import H265Video - from ._models import H265VideoLayer from ._models import Hls - from ._models import Image - from ._models import ImageFormat - from ._models import InputDefinition - from ._models import InputFile from ._models import IPAccessControl from ._models import IPRange - from ._models import Job - from ._models import JobError - from ._models import JobErrorDetail - from ._models import JobInput - from ._models import JobInputAsset - from ._models import JobInputClip - from ._models import JobInputHttp - from ._models import JobInputs - from ._models import JobInputSequence - from ._models import JobOutput - from ._models import JobOutputAsset - from ._models import JpgFormat - from ._models import JpgImage - from ._models import JpgLayer from ._models import KeyDelivery from ._models import KeyVaultProperties - from ._models import Layer from ._models import ListContainerSasInput from ._models import ListContentKeysResponse from ._models import ListEdgePoliciesInput @@ -309,26 +207,17 @@ from ._models import LiveEventPreviewAccessControl from ._models import LiveEventTranscription from ._models import LiveOutput - from ._models import Location from ._models import LogSpecification from ._models import MediaService from ._models import MediaServiceIdentity from ._models import MediaServiceUpdate from ._models import MetricDimension from ._models import MetricSpecification - from ._models import Mp4Format - from ._models import MultiBitrateFormat from ._models import NoEncryption - from ._models import ODataError from ._models import Operation + from ._models import OperationCollection from ._models import OperationDisplay - from ._models import OutputFile - from ._models import Overlay - from ._models import PngFormat - from ._models import PngImage - from ._models import PngLayer from ._models import PresentationTimeRange - from ._models import Preset from ._models import PrivateEndpoint from ._models import PrivateEndpointConnection from ._models import PrivateEndpointConnectionListResult @@ -336,16 +225,10 @@ from ._models import PrivateLinkResourceListResult from ._models import PrivateLinkServiceConnectionState from ._models import Properties - from ._models import Provider from ._models import ProxyResource - from ._models import Rectangle from ._models import Resource - from ._models import SelectAudioTrackByAttribute - from ._models import SelectAudioTrackById - from ._models import SelectVideoTrackByAttribute - from ._models import SelectVideoTrackById + from ._models import ResourceIdentity from ._models import ServiceSpecification - from ._models import StandardEncoderPreset from ._models import StorageAccount from ._models import StorageEncryptedAssetDecryptionData from ._models import StreamingEndpoint @@ -362,36 +245,21 @@ from ._models import StreamingPolicyWidevineConfiguration from ._models import SyncStorageKeysInput from ._models import SystemData - from ._models import TrackDescriptor from ._models import TrackedResource from ._models import TrackPropertyCondition from ._models import TrackSelection - from ._models import Transform - from ._models import TransformOutput - from ._models import TransportStreamFormat - from ._models import UtcClipTime - from ._models import Video - from ._models import VideoAnalyzerPreset - from ._models import VideoLayer - from ._models import VideoOverlay - from ._models import VideoTrackDescriptor + from ._models import UserAssignedManagedIdentity from ._paged_models import AccountFilterPaged from ._paged_models import AssetFilterPaged from ._paged_models import AssetPaged from ._paged_models import ContentKeyPolicyPaged -from ._paged_models import JobPaged from ._paged_models import LiveEventPaged from ._paged_models import LiveOutputPaged from ._paged_models import MediaServicePaged -from ._paged_models import OperationPaged from ._paged_models import StreamingEndpointPaged from ._paged_models import StreamingLocatorPaged from ._paged_models import StreamingPolicyPaged -from ._paged_models import TransformPaged from ._azure_media_services_enums import ( - FilterTrackPropertyType, - FilterTrackPropertyCompareOperation, - CreatedByType, MetricUnit, MetricAggregationType, ActionType, @@ -399,9 +267,12 @@ StorageAuthentication, AccountEncryptionKeyType, DefaultAction, - ManagedIdentityType, + PublicNetworkAccess, + CreatedByType, PrivateEndpointConnectionProvisioningState, PrivateEndpointServiceConnectionStatus, + FilterTrackPropertyType, + FilterTrackPropertyCompareOperation, AssetStorageEncryptionFormat, AssetContainerPermission, ContentKeyPolicyPlayReadyUnknownOutputPassingOption, @@ -409,32 +280,6 @@ ContentKeyPolicyPlayReadyContentType, ContentKeyPolicyRestrictionTokenType, ContentKeyPolicyFairPlayRentalAndLeaseKeyType, - AacAudioProfile, - H265VideoProfile, - StretchMode, - VideoSyncMode, - H265Complexity, - ChannelMapping, - TrackAttribute, - AttributeFilter, - AnalysisResolution, - FaceRedactorMode, - BlurType, - AudioAnalysisMode, - DeinterlaceParity, - DeinterlaceMode, - Rotation, - H264VideoProfile, - EntropyMode, - H264Complexity, - EncoderNamedPreset, - InsightsType, - OnErrorType, - Priority, - JobErrorCode, - JobErrorCategory, - JobRetry, - JobState, TrackPropertyType, TrackPropertyCompareOperation, StreamingLocatorContentKeyType, @@ -443,36 +288,27 @@ LiveOutputResourceState, LiveEventInputProtocol, LiveEventEncodingType, + StretchMode, LiveEventResourceState, StreamOptionsFlag, StreamingEndpointResourceState, ) __all__ = [ - 'AacAudio', - 'AbsoluteClipTime', 'AccessControl', 'AccountEncryption', 'AccountFilter', 'AkamaiAccessControl', 'AkamaiSignatureHeaderAuthenticationKey', - 'ApiError', 'ApiErrorException', 'Asset', 'AssetContainerSas', 'AssetFileEncryptionMetadata', 'AssetFilter', 'AssetStreamingLocator', - 'Audio', - 'AudioAnalyzerPreset', - 'AudioOverlay', - 'AudioTrackDescriptor', 'AzureEntityResource', - 'BuiltInStandardEncoderPreset', 'CbcsDrmConfiguration', 'CencDrmConfiguration', 'CheckNameAvailabilityInput', - 'ClipTime', - 'Codec', 'CommonEncryptionCbcs', 'CommonEncryptionCenc', 'ContentKeyPolicy', @@ -500,54 +336,25 @@ 'ContentKeyPolicyUnknownRestriction', 'ContentKeyPolicyWidevineConfiguration', 'ContentKeyPolicyX509CertificateTokenKey', - 'CopyAudio', - 'CopyVideo', 'CrossSiteAccessPolicies', 'DefaultKey', - 'Deinterlace', 'EdgePolicies', 'EdgeUsageDataCollectionPolicy', 'EdgeUsageDataEventHub', 'EnabledProtocols', 'EntityNameAvailabilityCheckOutput', 'EnvelopeEncryption', - 'FaceDetectorPreset', - 'Filters', + 'ErrorAdditionalInfo', + 'ErrorDetail', + 'ErrorResponse', 'ErrorResponseException', 'FilterTrackPropertyCondition', 'FilterTrackSelection', 'FirstQuality', - 'Format', - 'FromAllInputFile', - 'FromEachInputFile', - 'H264Layer', - 'H264Video', - 'H265Layer', - 'H265Video', - 'H265VideoLayer', 'Hls', - 'Image', - 'ImageFormat', - 'InputDefinition', - 'InputFile', 'IPAccessControl', 'IPRange', - 'Job', - 'JobError', - 'JobErrorDetail', - 'JobInput', - 'JobInputAsset', - 'JobInputClip', - 'JobInputHttp', - 'JobInputs', - 'JobInputSequence', - 'JobOutput', - 'JobOutputAsset', - 'JpgFormat', - 'JpgImage', - 'JpgLayer', 'KeyDelivery', 'KeyVaultProperties', - 'Layer', 'ListContainerSasInput', 'ListContentKeysResponse', 'ListEdgePoliciesInput', @@ -565,26 +372,17 @@ 'LiveEventPreviewAccessControl', 'LiveEventTranscription', 'LiveOutput', - 'Location', 'LogSpecification', 'MediaService', 'MediaServiceIdentity', 'MediaServiceUpdate', 'MetricDimension', 'MetricSpecification', - 'Mp4Format', - 'MultiBitrateFormat', 'NoEncryption', - 'ODataError', 'Operation', + 'OperationCollection', 'OperationDisplay', - 'OutputFile', - 'Overlay', - 'PngFormat', - 'PngImage', - 'PngLayer', 'PresentationTimeRange', - 'Preset', 'PrivateEndpoint', 'PrivateEndpointConnection', 'PrivateEndpointConnectionListResult', @@ -592,16 +390,10 @@ 'PrivateLinkResourceListResult', 'PrivateLinkServiceConnectionState', 'Properties', - 'Provider', 'ProxyResource', - 'Rectangle', 'Resource', - 'SelectAudioTrackByAttribute', - 'SelectAudioTrackById', - 'SelectVideoTrackByAttribute', - 'SelectVideoTrackById', + 'ResourceIdentity', 'ServiceSpecification', - 'StandardEncoderPreset', 'StorageAccount', 'StorageEncryptedAssetDecryptionData', 'StreamingEndpoint', @@ -618,35 +410,20 @@ 'StreamingPolicyWidevineConfiguration', 'SyncStorageKeysInput', 'SystemData', - 'TrackDescriptor', 'TrackedResource', 'TrackPropertyCondition', 'TrackSelection', - 'Transform', - 'TransformOutput', - 'TransportStreamFormat', - 'UtcClipTime', - 'Video', - 'VideoAnalyzerPreset', - 'VideoLayer', - 'VideoOverlay', - 'VideoTrackDescriptor', - 'AccountFilterPaged', - 'OperationPaged', + 'UserAssignedManagedIdentity', 'MediaServicePaged', + 'AccountFilterPaged', 'AssetPaged', 'AssetFilterPaged', 'ContentKeyPolicyPaged', - 'TransformPaged', - 'JobPaged', 'StreamingPolicyPaged', 'StreamingLocatorPaged', 'LiveEventPaged', 'LiveOutputPaged', 'StreamingEndpointPaged', - 'FilterTrackPropertyType', - 'FilterTrackPropertyCompareOperation', - 'CreatedByType', 'MetricUnit', 'MetricAggregationType', 'ActionType', @@ -654,9 +431,12 @@ 'StorageAuthentication', 'AccountEncryptionKeyType', 'DefaultAction', - 'ManagedIdentityType', + 'PublicNetworkAccess', + 'CreatedByType', 'PrivateEndpointConnectionProvisioningState', 'PrivateEndpointServiceConnectionStatus', + 'FilterTrackPropertyType', + 'FilterTrackPropertyCompareOperation', 'AssetStorageEncryptionFormat', 'AssetContainerPermission', 'ContentKeyPolicyPlayReadyUnknownOutputPassingOption', @@ -664,32 +444,6 @@ 'ContentKeyPolicyPlayReadyContentType', 'ContentKeyPolicyRestrictionTokenType', 'ContentKeyPolicyFairPlayRentalAndLeaseKeyType', - 'AacAudioProfile', - 'H265VideoProfile', - 'StretchMode', - 'VideoSyncMode', - 'H265Complexity', - 'ChannelMapping', - 'TrackAttribute', - 'AttributeFilter', - 'AnalysisResolution', - 'FaceRedactorMode', - 'BlurType', - 'AudioAnalysisMode', - 'DeinterlaceParity', - 'DeinterlaceMode', - 'Rotation', - 'H264VideoProfile', - 'EntropyMode', - 'H264Complexity', - 'EncoderNamedPreset', - 'InsightsType', - 'OnErrorType', - 'Priority', - 'JobErrorCode', - 'JobErrorCategory', - 'JobRetry', - 'JobState', 'TrackPropertyType', 'TrackPropertyCompareOperation', 'StreamingLocatorContentKeyType', @@ -698,6 +452,7 @@ 'LiveOutputResourceState', 'LiveEventInputProtocol', 'LiveEventEncodingType', + 'StretchMode', 'LiveEventResourceState', 'StreamOptionsFlag', 'StreamingEndpointResourceState', diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py index 5513e1cab62d..50a6a6f05690 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py @@ -12,30 +12,6 @@ from enum import Enum -class FilterTrackPropertyType(str, Enum): - - unknown = "Unknown" #: The unknown track property type. - type = "Type" #: The type. - name = "Name" #: The name. - language = "Language" #: The language. - four_cc = "FourCC" #: The fourCC. - bitrate = "Bitrate" #: The bitrate. - - -class FilterTrackPropertyCompareOperation(str, Enum): - - equal = "Equal" #: The equal operation. - not_equal = "NotEqual" #: The not equal operation. - - -class CreatedByType(str, Enum): - - user = "User" - application = "Application" - managed_identity = "ManagedIdentity" - key = "Key" - - class MetricUnit(str, Enum): bytes = "Bytes" #: The number of bytes. @@ -79,10 +55,18 @@ class DefaultAction(str, Enum): deny = "Deny" #: Public IP addresses are blocked. -class ManagedIdentityType(str, Enum): +class PublicNetworkAccess(str, Enum): - system_assigned = "SystemAssigned" #: A system-assigned managed identity. - none = "None" #: No managed identity. + enabled = "Enabled" #: Public network access is enabled. + disabled = "Disabled" #: Public network access is disabled. + + +class CreatedByType(str, Enum): + + user = "User" + application = "Application" + managed_identity = "ManagedIdentity" + key = "Key" class PrivateEndpointConnectionProvisioningState(str, Enum): @@ -100,6 +84,22 @@ class PrivateEndpointServiceConnectionStatus(str, Enum): rejected = "Rejected" +class FilterTrackPropertyType(str, Enum): + + unknown = "Unknown" #: The unknown track property type. + type = "Type" #: The type. + name = "Name" #: The name. + language = "Language" #: The language. + four_cc = "FourCC" #: The fourCC. + bitrate = "Bitrate" #: The bitrate. + + +class FilterTrackPropertyCompareOperation(str, Enum): + + equal = "Equal" #: The equal operation. + not_equal = "NotEqual" #: The not equal operation. + + class AssetStorageEncryptionFormat(str, Enum): none = "None" #: The Asset does not use client-side storage encryption (this is the only allowed value for new Assets). @@ -152,220 +152,6 @@ class ContentKeyPolicyFairPlayRentalAndLeaseKeyType(str, Enum): persistent_limited = "PersistentLimited" #: Content key can be persisted and the valid duration is limited by the Rental Duration value -class AacAudioProfile(str, Enum): - - aac_lc = "AacLc" #: Specifies that the output audio is to be encoded into AAC Low Complexity profile (AAC-LC). - he_aac_v1 = "HeAacV1" #: Specifies that the output audio is to be encoded into HE-AAC v1 profile. - he_aac_v2 = "HeAacV2" #: Specifies that the output audio is to be encoded into HE-AAC v2 profile. - - -class H265VideoProfile(str, Enum): - - auto = "Auto" #: Tells the encoder to automatically determine the appropriate H.265 profile. - main = "Main" #: Main profile (https://x265.readthedocs.io/en/default/cli.html?highlight=profile#profile-level-tier) - - -class StretchMode(str, Enum): - - none = "None" #: Strictly respect the output resolution without considering the pixel aspect ratio or display aspect ratio of the input video. - auto_size = "AutoSize" #: Override the output resolution, and change it to match the display aspect ratio of the input, without padding. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the value in the preset is overridden, and the output will be at 1280x720, which maintains the input aspect ratio of 16:9. - auto_fit = "AutoFit" #: Pad the output (with either letterbox or pillar box) to honor the output resolution, while ensuring that the active video region in the output has the same aspect ratio as the input. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the output will be at 1280x1280, which contains an inner rectangle of 1280x720 at aspect ratio of 16:9, and pillar box regions 280 pixels wide at the left and right. - - -class VideoSyncMode(str, Enum): - - auto = "Auto" #: This is the default method. Chooses between Cfr and Vfr depending on muxer capabilities. For output format MP4, the default mode is Cfr. - passthrough = "Passthrough" #: The presentation timestamps on frames are passed through from the input file to the output file writer. Recommended when the input source has variable frame rate, and are attempting to produce multiple layers for adaptive streaming in the output which have aligned GOP boundaries. Note: if two or more frames in the input have duplicate timestamps, then the output will also have the same behavior - cfr = "Cfr" #: Input frames will be repeated and/or dropped as needed to achieve exactly the requested constant frame rate. Recommended when the output frame rate is explicitly set at a specified value - vfr = "Vfr" #: Similar to the Passthrough mode, but if the input has frames that have duplicate timestamps, then only one frame is passed through to the output, and others are dropped. Recommended when the number of output frames is expected to be equal to the number of input frames. For example, the output is used to calculate a quality metric like PSNR against the input - - -class H265Complexity(str, Enum): - - speed = "Speed" #: Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed to decrease encoding time. - balanced = "Balanced" #: Tells the encoder to use settings that achieve a balance between speed and quality. - quality = "Quality" #: Tells the encoder to use settings that are optimized to produce higher quality output at the expense of slower overall encode time. - - -class ChannelMapping(str, Enum): - - front_left = "FrontLeft" #: The Front Left Channel. - front_right = "FrontRight" #: The Front Right Channel. - center = "Center" #: The Center Channel. - low_frequency_effects = "LowFrequencyEffects" #: Low Frequency Effects Channel. Sometimes referred to as the Subwoofer. - back_left = "BackLeft" #: The Back Left Channel. Sometimes referred to as the Left Surround Channel. - back_right = "BackRight" #: The Back Right Channel. Sometimes referred to as the Right Surround Channel. - stereo_left = "StereoLeft" #: The Left Stereo channel. Sometimes referred to as Down Mix Left. - stereo_right = "StereoRight" #: The Right Stereo channel. Sometimes referred to as Down Mix Right. - - -class TrackAttribute(str, Enum): - - bitrate = "Bitrate" #: The bitrate of the track. - language = "Language" #: The language of the track. - - -class AttributeFilter(str, Enum): - - all = "All" #: All tracks will be included. - top = "Top" #: The first track will be included when the attribute is sorted in descending order. Generally used to select the largest bitrate. - bottom = "Bottom" #: The first track will be included when the attribute is sorted in ascending order. Generally used to select the smallest bitrate. - value_equals = "ValueEquals" #: Any tracks that have an attribute equal to the value given will be included. - - -class AnalysisResolution(str, Enum): - - source_resolution = "SourceResolution" - standard_definition = "StandardDefinition" - - -class FaceRedactorMode(str, Enum): - - analyze = "Analyze" #: Analyze mode detects faces and outputs a metadata file with the results. Allows editing of the metadata file before faces are blurred with Redact mode. - redact = "Redact" #: Redact mode consumes the metadata file from Analyze mode and redacts the faces found. - combined = "Combined" #: Combined mode does the Analyze and Redact steps in one pass when editing the analyzed faces is not desired. - - -class BlurType(str, Enum): - - box = "Box" #: Box: debug filter, bounding box only - low = "Low" #: Low: box-car blur filter - med = "Med" #: Med: Gaussian blur filter - high = "High" #: High: Confuse blur filter - black = "Black" #: Black: Black out filter - - -class AudioAnalysisMode(str, Enum): - - standard = "Standard" #: Performs all operations included in the Basic mode, additionally performing language detection and speaker diarization. - basic = "Basic" #: This mode performs speech-to-text transcription and generation of a VTT subtitle/caption file. The output of this mode includes an Insights JSON file including only the keywords, transcription,and timing information. Automatic language detection and speaker diarization are not included in this mode. - - -class DeinterlaceParity(str, Enum): - - auto = "Auto" #: Automatically detect the order of fields - top_field_first = "TopFieldFirst" #: Apply top field first processing of input video. - bottom_field_first = "BottomFieldFirst" #: Apply bottom field first processing of input video. - - -class DeinterlaceMode(str, Enum): - - off = "Off" #: Disables de-interlacing of the source video. - auto_pixel_adaptive = "AutoPixelAdaptive" #: Apply automatic pixel adaptive de-interlacing on each frame in the input video. - - -class Rotation(str, Enum): - - auto = "Auto" #: Automatically detect and rotate as needed. - none = "None" #: Do not rotate the video. If the output format supports it, any metadata about rotation is kept intact. - rotate0 = "Rotate0" #: Do not rotate the video but remove any metadata about the rotation. - rotate90 = "Rotate90" #: Rotate 90 degrees clockwise. - rotate180 = "Rotate180" #: Rotate 180 degrees clockwise. - rotate270 = "Rotate270" #: Rotate 270 degrees clockwise. - - -class H264VideoProfile(str, Enum): - - auto = "Auto" #: Tells the encoder to automatically determine the appropriate H.264 profile. - baseline = "Baseline" #: Baseline profile - main = "Main" #: Main profile - high = "High" #: High profile. - high422 = "High422" #: High 4:2:2 profile. - high444 = "High444" #: High 4:4:4 predictive profile. - - -class EntropyMode(str, Enum): - - cabac = "Cabac" #: Context Adaptive Binary Arithmetic Coder (CABAC) entropy encoding. - cavlc = "Cavlc" #: Context Adaptive Variable Length Coder (CAVLC) entropy encoding. - - -class H264Complexity(str, Enum): - - speed = "Speed" #: Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed to decrease encoding time. - balanced = "Balanced" #: Tells the encoder to use settings that achieve a balance between speed and quality. - quality = "Quality" #: Tells the encoder to use settings that are optimized to produce higher quality output at the expense of slower overall encode time. - - -class EncoderNamedPreset(str, Enum): - - h264_single_bitrate_sd = "H264SingleBitrateSD" #: Produces an MP4 file where the video is encoded with H.264 codec at 2200 kbps and a picture height of 480 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - h264_single_bitrate720p = "H264SingleBitrate720p" #: Produces an MP4 file where the video is encoded with H.264 codec at 4500 kbps and a picture height of 720 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - h264_single_bitrate1080p = "H264SingleBitrate1080p" #: Produces an MP4 file where the video is encoded with H.264 codec at 6750 kbps and a picture height of 1080 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - adaptive_streaming = "AdaptiveStreaming" #: Produces a set of GOP aligned MP4 files with H.264 video and stereo AAC audio. Auto-generates a bitrate ladder based on the input resolution, bitrate and frame rate. The auto-generated preset will never exceed the input resolution. For example, if the input is 720p, output will remain 720p at best. - aac_good_quality_audio = "AACGoodQualityAudio" #: Produces a single MP4 file containing only stereo audio encoded at 192 kbps. - content_aware_encoding_experimental = "ContentAwareEncodingExperimental" #: Exposes an experimental preset for content-aware encoding. Given any input content, the service attempts to automatically determine the optimal number of layers, appropriate bitrate and resolution settings for delivery by adaptive streaming. The underlying algorithms will continue to evolve over time. The output will contain MP4 files with video and audio interleaved. - content_aware_encoding = "ContentAwareEncoding" #: Produces a set of GOP-aligned MP4s by using content-aware encoding. Given any input content, the service performs an initial lightweight analysis of the input content, and uses the results to determine the optimal number of layers, appropriate bitrate and resolution settings for delivery by adaptive streaming. This preset is particularly effective for low and medium complexity videos, where the output files will be at lower bitrates but at a quality that still delivers a good experience to viewers. The output will contain MP4 files with video and audio interleaved. - copy_all_bitrate_non_interleaved = "CopyAllBitrateNonInterleaved" #: Copy all video and audio streams from the input asset as non-interleaved video and audio output files. This preset can be used to clip an existing asset or convert a group of key frame (GOP) aligned MP4 files as an asset that can be streamed. - h264_multiple_bitrate1080p = "H264MultipleBitrate1080p" #: Produces a set of 8 GOP-aligned MP4 files, ranging from 6000 kbps to 400 kbps, and stereo AAC audio. Resolution starts at 1080p and goes down to 180p. - h264_multiple_bitrate720p = "H264MultipleBitrate720p" #: Produces a set of 6 GOP-aligned MP4 files, ranging from 3400 kbps to 400 kbps, and stereo AAC audio. Resolution starts at 720p and goes down to 180p. - h264_multiple_bitrate_sd = "H264MultipleBitrateSD" #: Produces a set of 5 GOP-aligned MP4 files, ranging from 1900kbps to 400 kbps, and stereo AAC audio. Resolution starts at 480p and goes down to 240p. - h265_content_aware_encoding = "H265ContentAwareEncoding" #: Produces a set of GOP-aligned MP4s by using content-aware encoding. Given any input content, the service performs an initial lightweight analysis of the input content, and uses the results to determine the optimal number of layers, appropriate bitrate and resolution settings for delivery by adaptive streaming. This preset is particularly effective for low and medium complexity videos, where the output files will be at lower bitrates but at a quality that still delivers a good experience to viewers. The output will contain MP4 files with video and audio interleaved. - h265_adaptive_streaming = "H265AdaptiveStreaming" #: Produces a set of GOP aligned MP4 files with H.265 video and stereo AAC audio. Auto-generates a bitrate ladder based on the input resolution, bitrate and frame rate. The auto-generated preset will never exceed the input resolution. For example, if the input is 720p, output will remain 720p at best. - h265_single_bitrate720p = "H265SingleBitrate720p" #: Produces an MP4 file where the video is encoded with H.265 codec at 1800 kbps and a picture height of 720 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - h265_single_bitrate1080p = "H265SingleBitrate1080p" #: Produces an MP4 file where the video is encoded with H.265 codec at 3500 kbps and a picture height of 1080 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - h265_single_bitrate4_k = "H265SingleBitrate4K" #: Produces an MP4 file where the video is encoded with H.265 codec at 9500 kbps and a picture height of 2160 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - - -class InsightsType(str, Enum): - - audio_insights_only = "AudioInsightsOnly" #: Generate audio only insights. Ignore video even if present. Fails if no audio is present. - video_insights_only = "VideoInsightsOnly" #: Generate video only insights. Ignore audio if present. Fails if no video is present. - all_insights = "AllInsights" #: Generate both audio and video insights. Fails if either audio or video Insights fail. - - -class OnErrorType(str, Enum): - - stop_processing_job = "StopProcessingJob" #: Tells the service that if this TransformOutput fails, then any other incomplete TransformOutputs can be stopped. - continue_job = "ContinueJob" #: Tells the service that if this TransformOutput fails, then allow any other TransformOutput to continue. - - -class Priority(str, Enum): - - low = "Low" #: Used for TransformOutputs that can be generated after Normal and High priority TransformOutputs. - normal = "Normal" #: Used for TransformOutputs that can be generated at Normal priority. - high = "High" #: Used for TransformOutputs that should take precedence over others. - - -class JobErrorCode(str, Enum): - - service_error = "ServiceError" #: Fatal service error, please contact support. - service_transient_error = "ServiceTransientError" #: Transient error, please retry, if retry is unsuccessful, please contact support. - download_not_accessible = "DownloadNotAccessible" #: While trying to download the input files, the files were not accessible, please check the availability of the source. - download_transient_error = "DownloadTransientError" #: While trying to download the input files, there was an issue during transfer (storage service, network errors), see details and check your source. - upload_not_accessible = "UploadNotAccessible" #: While trying to upload the output files, the destination was not reachable, please check the availability of the destination. - upload_transient_error = "UploadTransientError" #: While trying to upload the output files, there was an issue during transfer (storage service, network errors), see details and check your destination. - configuration_unsupported = "ConfigurationUnsupported" #: There was a problem with the combination of input files and the configuration settings applied, fix the configuration settings and retry with the same input, or change input to match the configuration. - content_malformed = "ContentMalformed" #: There was a problem with the input content (for example: zero byte files, or corrupt/non-decodable files), check the input files. - content_unsupported = "ContentUnsupported" #: There was a problem with the format of the input (not valid media file, or an unsupported file/codec), check the validity of the input files. - - -class JobErrorCategory(str, Enum): - - service = "Service" #: The error is service related. - download = "Download" #: The error is download related. - upload = "Upload" #: The error is upload related. - configuration = "Configuration" #: The error is configuration related. - content = "Content" #: The error is related to data in the input files. - - -class JobRetry(str, Enum): - - do_not_retry = "DoNotRetry" #: Issue needs to be investigated and then the job resubmitted with corrections or retried once the underlying issue has been corrected. - may_retry = "MayRetry" #: Issue may be resolved after waiting for a period of time and resubmitting the same Job. - - -class JobState(str, Enum): - - canceled = "Canceled" #: The job was canceled. This is a final state for the job. - canceling = "Canceling" #: The job is in the process of being canceled. This is a transient state for the job. - error = "Error" #: The job has encountered an error. This is a final state for the job. - finished = "Finished" #: The job is finished. This is a final state for the job. - processing = "Processing" #: The job is processing. This is a transient state for the job. - queued = "Queued" #: The job is in a queued state, waiting for resources to become available. This is a transient state. - scheduled = "Scheduled" #: The job is being scheduled to run on an available resource. This is a transient state, between queued and processing states. - - class TrackPropertyType(str, Enum): unknown = "Unknown" #: Unknown track property @@ -419,6 +205,15 @@ class LiveEventEncodingType(str, Enum): none = "None" #: A contribution live encoder sends a multiple bitrate stream. The ingested stream passes through the live event without any further processing. It is also called the pass-through mode. standard = "Standard" #: A contribution live encoder sends a single bitrate stream to the live event and Media Services creates multiple bitrate streams. The output cannot exceed 720p in resolution. premium1080p = "Premium1080p" #: A contribution live encoder sends a single bitrate stream to the live event and Media Services creates multiple bitrate streams. The output cannot exceed 1080p in resolution. + passthrough_basic = "PassthroughBasic" #: Pending update... + passthrough_standard = "PassthroughStandard" #: Pending update... + + +class StretchMode(str, Enum): + + none = "None" #: Strictly respects the output resolution specified in the encoding preset without considering the pixel aspect ratio or display aspect ratio of the input video. + auto_size = "AutoSize" #: Override the output resolution, and change it to match the display aspect ratio of the input, without padding. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the value in the preset is overridden, and the output will be at 1280x720, which maintains the input aspect ratio of 16:9. + auto_fit = "AutoFit" #: Pad the output (with either letterbox or pillar box) to honor the output resolution, while ensuring that the active video region in the output has the same aspect ratio as the input. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the output will be at 1280x1280, which contains an inner rectangle of 1280x720 at aspect ratio of 16:9, and pillar box regions 280 pixels wide at the left and right. class LiveEventResourceState(str, Enum): diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py index dde71d02f903..78e3857ef7d9 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py @@ -13,188 +13,6 @@ from msrest.exceptions import HttpOperationError -class Codec(Model): - """Describes the basic properties of all codecs. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Audio, Video, CopyVideo, CopyAudio - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.Audio': 'Audio', '#Microsoft.Media.Video': 'Video', '#Microsoft.Media.CopyVideo': 'CopyVideo', '#Microsoft.Media.CopyAudio': 'CopyAudio'} - } - - def __init__(self, **kwargs): - super(Codec, self).__init__(**kwargs) - self.label = kwargs.get('label', None) - self.odatatype = None - - -class Audio(Codec): - """Defines the common properties for all audio codecs. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AacAudio - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channels: The number of channels in the audio. - :type channels: int - :param sampling_rate: The sampling rate to use for encoding in hertz. - :type sampling_rate: int - :param bitrate: The bitrate, in bits per second, of the output encoded - audio. - :type bitrate: int - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channels': {'key': 'channels', 'type': 'int'}, - 'sampling_rate': {'key': 'samplingRate', 'type': 'int'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AacAudio': 'AacAudio'} - } - - def __init__(self, **kwargs): - super(Audio, self).__init__(**kwargs) - self.channels = kwargs.get('channels', None) - self.sampling_rate = kwargs.get('sampling_rate', None) - self.bitrate = kwargs.get('bitrate', None) - self.odatatype = '#Microsoft.Media.Audio' - - -class AacAudio(Audio): - """Describes Advanced Audio Codec (AAC) audio encoding settings. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channels: The number of channels in the audio. - :type channels: int - :param sampling_rate: The sampling rate to use for encoding in hertz. - :type sampling_rate: int - :param bitrate: The bitrate, in bits per second, of the output encoded - audio. - :type bitrate: int - :param profile: The encoding profile to be used when encoding audio with - AAC. Possible values include: 'AacLc', 'HeAacV1', 'HeAacV2' - :type profile: str or ~azure.mgmt.media.models.AacAudioProfile - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channels': {'key': 'channels', 'type': 'int'}, - 'sampling_rate': {'key': 'samplingRate', 'type': 'int'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'profile': {'key': 'profile', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AacAudio, self).__init__(**kwargs) - self.profile = kwargs.get('profile', None) - self.odatatype = '#Microsoft.Media.AacAudio' - - -class ClipTime(Model): - """Base class for specifying a clip time. Use sub classes of this class to - specify the time position in the media. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AbsoluteClipTime, UtcClipTime - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AbsoluteClipTime': 'AbsoluteClipTime', '#Microsoft.Media.UtcClipTime': 'UtcClipTime'} - } - - def __init__(self, **kwargs): - super(ClipTime, self).__init__(**kwargs) - self.odatatype = None - - -class AbsoluteClipTime(ClipTime): - """Specifies the clip time as an absolute time position in the media file. - The absolute time can point to a different position depending on whether - the media file starts from a timestamp of zero or not. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param time: Required. The time position on the timeline of the input - media. It is usually specified as an ISO8601 period. e.g PT30S for 30 - seconds. - :type time: timedelta - """ - - _validation = { - 'odatatype': {'required': True}, - 'time': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'time': {'key': 'time', 'type': 'duration'}, - } - - def __init__(self, **kwargs): - super(AbsoluteClipTime, self).__init__(**kwargs) - self.time = kwargs.get('time', None) - self.odatatype = '#Microsoft.Media.AbsoluteClipTime' - - class AccessControl(Model): """AccessControl. @@ -221,6 +39,9 @@ def __init__(self, **kwargs): class AccountEncryption(Model): """AccountEncryption. + Variables are only populated by the server, and will be ignored when + sending a request. + All required parameters must be populated in order to send to Azure. :param type: Required. The type of key used to encrypt the Account Key. @@ -229,21 +50,30 @@ class AccountEncryption(Model): :param key_vault_properties: The properties of the key used to encrypt the account. :type key_vault_properties: ~azure.mgmt.media.models.KeyVaultProperties + :param identity: The Key Vault identity. + :type identity: ~azure.mgmt.media.models.ResourceIdentity + :ivar status: The current status of the Key Vault mapping. + :vartype status: str """ _validation = { 'type': {'required': True}, + 'status': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, + 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, + 'status': {'key': 'status', 'type': 'str'}, } def __init__(self, **kwargs): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs.get('type', None) self.key_vault_properties = kwargs.get('key_vault_properties', None) + self.identity = kwargs.get('identity', None) + self.status = None class Resource(Model): @@ -411,34 +241,6 @@ def __init__(self, **kwargs): self.expiration = kwargs.get('expiration', None) -class ApiError(Model): - """The API error. - - :param error: The error properties. - :type error: ~azure.mgmt.media.models.ODataError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'ODataError'}, - } - - def __init__(self, **kwargs): - super(ApiError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class ApiErrorException(HttpOperationError): - """Server responsed with exception of type: 'ApiError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(ApiErrorException, self).__init__(deserialize, response, 'ApiError', *args) - - class Asset(ProxyResource): """An Asset. @@ -671,430 +473,105 @@ def __init__(self, **kwargs): self.default_content_key_policy_name = None -class Preset(Model): - """Base type for all Presets, which define the recipe or instructions on how - the input media files should be processed. +class AzureEntityResource(Resource): + """Entity Resource. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FaceDetectorPreset, AudioAnalyzerPreset, - BuiltInStandardEncoderPreset, StandardEncoderPreset + The resource model definition for an Azure Resource Manager resource with + an etag. - All required parameters must be populated in order to send to Azure. + Variables are only populated by the server, and will be ignored when + sending a request. - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + :vartype id: str + :ivar name: The name of the resource + :vartype name: str + :ivar type: The type of the resource. E.g. + "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + :vartype type: str + :ivar etag: Resource Etag. + :vartype etag: str """ _validation = { - 'odatatype': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, } _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.FaceDetectorPreset': 'FaceDetectorPreset', '#Microsoft.Media.AudioAnalyzerPreset': 'AudioAnalyzerPreset', '#Microsoft.Media.BuiltInStandardEncoderPreset': 'BuiltInStandardEncoderPreset', '#Microsoft.Media.StandardEncoderPreset': 'StandardEncoderPreset'} + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, **kwargs): - super(Preset, self).__init__(**kwargs) - self.odatatype = None - - -class AudioAnalyzerPreset(Preset): - """The Audio Analyzer preset applies a pre-defined set of AI-based analysis - operations, including speech transcription. Currently, the preset supports - processing of content with a single audio track. + super(AzureEntityResource, self).__init__(**kwargs) + self.etag = None - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: VideoAnalyzerPreset - All required parameters must be populated in order to send to Azure. +class CbcsDrmConfiguration(Model): + """Class to specify DRM configurations of CommonEncryptionCbcs scheme in + Streaming Policy. - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param audio_language: The language for the audio payload in the input - using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you - know the language of your content, it is recommended that you specify it. - The language must be specified explicitly for AudioAnalysisMode::Basic, - since automatic language detection is not included in basic mode. If the - language isn't specified or set to null, automatic language detection will - choose the first language detected and process with the selected language - for the duration of the file. It does not currently support dynamically - switching between languages after the first language is detected. The - automatic detection works best with audio recordings with clearly - discernable speech. If automatic detection fails to find the language, - transcription would fallback to 'en-US'." The list of supported languages - is available here: https://go.microsoft.com/fwlink/?linkid=2109463 - :type audio_language: str - :param mode: Determines the set of audio analysis operations to be - performed. If unspecified, the Standard AudioAnalysisMode would be chosen. - Possible values include: 'Standard', 'Basic' - :type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode - :param experimental_options: Dictionary containing key value pairs for - parameters not exposed in the preset itself - :type experimental_options: dict[str, str] + :param fair_play: FairPlay configurations + :type fair_play: + ~azure.mgmt.media.models.StreamingPolicyFairPlayConfiguration + :param play_ready: PlayReady configurations + :type play_ready: + ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration + :param widevine: Widevine configurations + :type widevine: + ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration """ - _validation = { - 'odatatype': {'required': True}, - } - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'audio_language': {'key': 'audioLanguage', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.VideoAnalyzerPreset': 'VideoAnalyzerPreset'} + 'fair_play': {'key': 'fairPlay', 'type': 'StreamingPolicyFairPlayConfiguration'}, + 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, + 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, } def __init__(self, **kwargs): - super(AudioAnalyzerPreset, self).__init__(**kwargs) - self.audio_language = kwargs.get('audio_language', None) - self.mode = kwargs.get('mode', None) - self.experimental_options = kwargs.get('experimental_options', None) - self.odatatype = '#Microsoft.Media.AudioAnalyzerPreset' - - -class Overlay(Model): - """Base type for all overlays - image, audio or video. + super(CbcsDrmConfiguration, self).__init__(**kwargs) + self.fair_play = kwargs.get('fair_play', None) + self.play_ready = kwargs.get('play_ready', None) + self.widevine = kwargs.get('widevine', None) - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AudioOverlay, VideoOverlay - All required parameters must be populated in order to send to Azure. +class CencDrmConfiguration(Model): + """Class to specify DRM configurations of CommonEncryptionCenc scheme in + Streaming Policy. - :param input_label: Required. The label of the job input which is to be - used as an overlay. The Input must specify exactly one file. You can - specify an image file in JPG, PNG, GIF or BMP format, or an audio file - (such as a WAV, MP3, WMA or M4A file), or a video file. See - https://aka.ms/mesformats for the complete list of supported audio and - video file formats. - :type input_label: str - :param start: The start position, with reference to the input video, at - which the overlay starts. The value should be in ISO 8601 format. For - example, PT05S to start the overlay at 5 seconds into the input video. If - not specified the overlay starts from the beginning of the input video. - :type start: timedelta - :param end: The end position, with reference to the input video, at which - the overlay ends. The value should be in ISO 8601 format. For example, - PT30S to end the overlay at 30 seconds into the input video. If not - specified or the value is greater than the input video duration, the - overlay will be applied until the end of the input video if the overlay - media duration is greater than the input video duration, else the overlay - will last as long as the overlay media duration. - :type end: timedelta - :param fade_in_duration: The duration over which the overlay fades in onto - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade in (same as PT0S). - :type fade_in_duration: timedelta - :param fade_out_duration: The duration over which the overlay fades out of - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade out (same as PT0S). - :type fade_out_duration: timedelta - :param audio_gain_level: The gain level of audio in the overlay. The value - should be in the range [0, 1.0]. The default is 1.0. - :type audio_gain_level: float - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param play_ready: PlayReady configurations + :type play_ready: + ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration + :param widevine: Widevine configurations + :type widevine: + ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration """ - _validation = { - 'input_label': {'required': True}, - 'odatatype': {'required': True}, - } - _attribute_map = { - 'input_label': {'key': 'inputLabel', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'duration'}, - 'end': {'key': 'end', 'type': 'duration'}, - 'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'}, - 'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'}, - 'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AudioOverlay': 'AudioOverlay', '#Microsoft.Media.VideoOverlay': 'VideoOverlay'} + 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, + 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, } def __init__(self, **kwargs): - super(Overlay, self).__init__(**kwargs) - self.input_label = kwargs.get('input_label', None) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - self.fade_in_duration = kwargs.get('fade_in_duration', None) - self.fade_out_duration = kwargs.get('fade_out_duration', None) - self.audio_gain_level = kwargs.get('audio_gain_level', None) - self.odatatype = None - + super(CencDrmConfiguration, self).__init__(**kwargs) + self.play_ready = kwargs.get('play_ready', None) + self.widevine = kwargs.get('widevine', None) -class AudioOverlay(Overlay): - """Describes the properties of an audio overlay. - All required parameters must be populated in order to send to Azure. +class CheckNameAvailabilityInput(Model): + """The input to the check name availability request. - :param input_label: Required. The label of the job input which is to be - used as an overlay. The Input must specify exactly one file. You can - specify an image file in JPG, PNG, GIF or BMP format, or an audio file - (such as a WAV, MP3, WMA or M4A file), or a video file. See - https://aka.ms/mesformats for the complete list of supported audio and - video file formats. - :type input_label: str - :param start: The start position, with reference to the input video, at - which the overlay starts. The value should be in ISO 8601 format. For - example, PT05S to start the overlay at 5 seconds into the input video. If - not specified the overlay starts from the beginning of the input video. - :type start: timedelta - :param end: The end position, with reference to the input video, at which - the overlay ends. The value should be in ISO 8601 format. For example, - PT30S to end the overlay at 30 seconds into the input video. If not - specified or the value is greater than the input video duration, the - overlay will be applied until the end of the input video if the overlay - media duration is greater than the input video duration, else the overlay - will last as long as the overlay media duration. - :type end: timedelta - :param fade_in_duration: The duration over which the overlay fades in onto - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade in (same as PT0S). - :type fade_in_duration: timedelta - :param fade_out_duration: The duration over which the overlay fades out of - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade out (same as PT0S). - :type fade_out_duration: timedelta - :param audio_gain_level: The gain level of audio in the overlay. The value - should be in the range [0, 1.0]. The default is 1.0. - :type audio_gain_level: float - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'input_label': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'input_label': {'key': 'inputLabel', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'duration'}, - 'end': {'key': 'end', 'type': 'duration'}, - 'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'}, - 'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'}, - 'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AudioOverlay, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.AudioOverlay' - - -class TrackDescriptor(Model): - """Base type for all TrackDescriptor types, which define the metadata and - selection for tracks that should be processed by a Job. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AudioTrackDescriptor, VideoTrackDescriptor - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AudioTrackDescriptor': 'AudioTrackDescriptor', '#Microsoft.Media.VideoTrackDescriptor': 'VideoTrackDescriptor'} - } - - def __init__(self, **kwargs): - super(TrackDescriptor, self).__init__(**kwargs) - self.odatatype = None - - -class AudioTrackDescriptor(TrackDescriptor): - """A TrackSelection to select audio tracks. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SelectAudioTrackByAttribute, SelectAudioTrackById - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channel_mapping: Optional designation for single channel audio - tracks. Can be used to combine the tracks into stereo or multi-channel - audio tracks. Possible values include: 'FrontLeft', 'FrontRight', - 'Center', 'LowFrequencyEffects', 'BackLeft', 'BackRight', 'StereoLeft', - 'StereoRight' - :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.SelectAudioTrackByAttribute': 'SelectAudioTrackByAttribute', '#Microsoft.Media.SelectAudioTrackById': 'SelectAudioTrackById'} - } - - def __init__(self, **kwargs): - super(AudioTrackDescriptor, self).__init__(**kwargs) - self.channel_mapping = kwargs.get('channel_mapping', None) - self.odatatype = '#Microsoft.Media.AudioTrackDescriptor' - - -class AzureEntityResource(Resource): - """Entity Resource. - - The resource model definition for an Azure Resource Manager resource with - an etag. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - :vartype id: str - :ivar name: The name of the resource - :vartype name: str - :ivar type: The type of the resource. E.g. - "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - :vartype type: str - :ivar etag: Resource Etag. - :vartype etag: str - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AzureEntityResource, self).__init__(**kwargs) - self.etag = None - - -class BuiltInStandardEncoderPreset(Preset): - """Describes a built-in preset for encoding the input video with the Standard - Encoder. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param preset_name: Required. The built-in preset to be used for encoding - videos. Possible values include: 'H264SingleBitrateSD', - 'H264SingleBitrate720p', 'H264SingleBitrate1080p', 'AdaptiveStreaming', - 'AACGoodQualityAudio', 'ContentAwareEncodingExperimental', - 'ContentAwareEncoding', 'CopyAllBitrateNonInterleaved', - 'H264MultipleBitrate1080p', 'H264MultipleBitrate720p', - 'H264MultipleBitrateSD', 'H265ContentAwareEncoding', - 'H265AdaptiveStreaming', 'H265SingleBitrate720p', - 'H265SingleBitrate1080p', 'H265SingleBitrate4K' - :type preset_name: str or ~azure.mgmt.media.models.EncoderNamedPreset - """ - - _validation = { - 'odatatype': {'required': True}, - 'preset_name': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'preset_name': {'key': 'presetName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(BuiltInStandardEncoderPreset, self).__init__(**kwargs) - self.preset_name = kwargs.get('preset_name', None) - self.odatatype = '#Microsoft.Media.BuiltInStandardEncoderPreset' - - -class CbcsDrmConfiguration(Model): - """Class to specify DRM configurations of CommonEncryptionCbcs scheme in - Streaming Policy. - - :param fair_play: FairPlay configurations - :type fair_play: - ~azure.mgmt.media.models.StreamingPolicyFairPlayConfiguration - :param play_ready: PlayReady configurations - :type play_ready: - ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration - :param widevine: Widevine configurations - :type widevine: - ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration - """ - - _attribute_map = { - 'fair_play': {'key': 'fairPlay', 'type': 'StreamingPolicyFairPlayConfiguration'}, - 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, - 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, - } - - def __init__(self, **kwargs): - super(CbcsDrmConfiguration, self).__init__(**kwargs) - self.fair_play = kwargs.get('fair_play', None) - self.play_ready = kwargs.get('play_ready', None) - self.widevine = kwargs.get('widevine', None) - - -class CencDrmConfiguration(Model): - """Class to specify DRM configurations of CommonEncryptionCenc scheme in - Streaming Policy. - - :param play_ready: PlayReady configurations - :type play_ready: - ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration - :param widevine: Widevine configurations - :type widevine: - ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration - """ - - _attribute_map = { - 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, - 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, - } - - def __init__(self, **kwargs): - super(CencDrmConfiguration, self).__init__(**kwargs) - self.play_ready = kwargs.get('play_ready', None) - self.widevine = kwargs.get('widevine', None) - - -class CheckNameAvailabilityInput(Model): - """The input to the check name availability request. - - :param name: The account name. - :type name: str - :param type: The account type. For a Media Services account, this should - be 'MediaServices'. - :type type: str + :param name: The account name. + :type name: str + :param type: The account type. For a Media Services account, this should + be 'MediaServices'. + :type type: str """ _attribute_map = { @@ -2096,59 +1573,6 @@ def __init__(self, **kwargs): self.odatatype = '#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey' -class CopyAudio(Codec): - """A codec flag, which tells the encoder to copy the input audio bitstream. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CopyAudio, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.CopyAudio' - - -class CopyVideo(Codec): - """A codec flag, which tells the encoder to copy the input video bitstream - without re-encoding. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CopyVideo, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.CopyVideo' - - class CrossSiteAccessPolicies(Model): """The client access policy. @@ -2193,28 +1617,6 @@ def __init__(self, **kwargs): self.policy_name = kwargs.get('policy_name', None) -class Deinterlace(Model): - """Describes the de-interlacing settings. - - :param parity: The field parity for de-interlacing, defaults to Auto. - Possible values include: 'Auto', 'TopFieldFirst', 'BottomFieldFirst' - :type parity: str or ~azure.mgmt.media.models.DeinterlaceParity - :param mode: The deinterlacing mode. Defaults to AutoPixelAdaptive. - Possible values include: 'Off', 'AutoPixelAdaptive' - :type mode: str or ~azure.mgmt.media.models.DeinterlaceMode - """ - - _attribute_map = { - 'parity': {'key': 'parity', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Deinterlace, self).__init__(**kwargs) - self.parity = kwargs.get('parity', None) - self.mode = kwargs.get('mode', None) - - class EdgePolicies(Model): """EdgePolicies. @@ -2393,102 +1795,111 @@ def __init__(self, **kwargs): self.custom_key_acquisition_url_template = kwargs.get('custom_key_acquisition_url_template', None) -class FaceDetectorPreset(Preset): - """Describes all the settings to be used when analyzing a video in order to - detect (and optionally redact) all the faces present. +class ErrorAdditionalInfo(Model): + """The resource management error additional info. - All required parameters must be populated in order to send to Azure. + Variables are only populated by the server, and will be ignored when + sending a request. - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param resolution: Specifies the maximum resolution at which your video is - analyzed. The default behavior is "SourceResolution," which will keep the - input video at its original resolution when analyzed. Using - "StandardDefinition" will resize input videos to standard definition while - preserving the appropriate aspect ratio. It will only resize if the video - is of higher resolution. For example, a 1920x1080 input would be scaled to - 640x360 before processing. Switching to "StandardDefinition" will reduce - the time it takes to process high resolution video. It may also reduce the - cost of using this component (see - https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics - for details). However, faces that end up being too small in the resized - video may not be detected. Possible values include: 'SourceResolution', - 'StandardDefinition' - :type resolution: str or ~azure.mgmt.media.models.AnalysisResolution - :param mode: This mode provides the ability to choose between the - following settings: 1) Analyze - For detection only.This mode generates a - metadata JSON file marking appearances of faces throughout the video.Where - possible, appearances of the same person are assigned the same ID. 2) - Combined - Additionally redacts(blurs) detected faces. 3) Redact - This - enables a 2-pass process, allowing for selective redaction of a subset of - detected faces.It takes in the metadata file from a prior analyze pass, - along with the source video, and a user-selected subset of IDs that - require redaction. Possible values include: 'Analyze', 'Redact', - 'Combined' - :type mode: str or ~azure.mgmt.media.models.FaceRedactorMode - :param blur_type: Blur type. Possible values include: 'Box', 'Low', 'Med', - 'High', 'Black' - :type blur_type: str or ~azure.mgmt.media.models.BlurType - :param experimental_options: Dictionary containing key value pairs for - parameters not exposed in the preset itself - :type experimental_options: dict[str, str] + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: object """ _validation = { - 'odatatype': {'required': True}, + 'type': {'readonly': True}, + 'info': {'readonly': True}, } _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - 'blur_type': {'key': 'blurType', 'type': 'str'}, - 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, + 'type': {'key': 'type', 'type': 'str'}, + 'info': {'key': 'info', 'type': 'object'}, } def __init__(self, **kwargs): - super(FaceDetectorPreset, self).__init__(**kwargs) - self.resolution = kwargs.get('resolution', None) - self.mode = kwargs.get('mode', None) - self.blur_type = kwargs.get('blur_type', None) - self.experimental_options = kwargs.get('experimental_options', None) - self.odatatype = '#Microsoft.Media.FaceDetectorPreset' - - -class Filters(Model): - """Describes all the filtering operations, such as de-interlacing, rotation - etc. that are to be applied to the input media before encoding. - - :param deinterlace: The de-interlacing settings. - :type deinterlace: ~azure.mgmt.media.models.Deinterlace - :param rotation: The rotation, if any, to be applied to the input video, - before it is encoded. Default is Auto. Possible values include: 'Auto', - 'None', 'Rotate0', 'Rotate90', 'Rotate180', 'Rotate270' - :type rotation: str or ~azure.mgmt.media.models.Rotation - :param crop: The parameters for the rectangular window with which to crop - the input video. - :type crop: ~azure.mgmt.media.models.Rectangle - :param overlays: The properties of overlays to be applied to the input - video. These could be audio, image or video overlays. - :type overlays: list[~azure.mgmt.media.models.Overlay] + super(ErrorAdditionalInfo, self).__init__(**kwargs) + self.type = None + self.info = None + + +class ErrorDetail(Model): + """The error detail. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.mgmt.media.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: + list[~azure.mgmt.media.models.ErrorAdditionalInfo] """ + _validation = { + 'code': {'readonly': True}, + 'message': {'readonly': True}, + 'target': {'readonly': True}, + 'details': {'readonly': True}, + 'additional_info': {'readonly': True}, + } + _attribute_map = { - 'deinterlace': {'key': 'deinterlace', 'type': 'Deinterlace'}, - 'rotation': {'key': 'rotation', 'type': 'str'}, - 'crop': {'key': 'crop', 'type': 'Rectangle'}, - 'overlays': {'key': 'overlays', 'type': '[Overlay]'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[ErrorDetail]'}, + 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__(self, **kwargs): - super(Filters, self).__init__(**kwargs) - self.deinterlace = kwargs.get('deinterlace', None) - self.rotation = kwargs.get('rotation', None) - self.crop = kwargs.get('crop', None) - self.overlays = kwargs.get('overlays', None) + super(ErrorDetail, self).__init__(**kwargs) + self.code = None + self.message = None + self.target = None + self.details = None + self.additional_info = None -class FilterTrackPropertyCondition(Model): +class ErrorResponse(Model): + """Error response. + + Common error response for all Azure Resource Manager APIs to return error + details for failed operations. (This also follows the OData error response + format.). + + :param error: The error object. + :type error: ~azure.mgmt.media.models.ErrorDetail + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'ErrorDetail'}, + } + + def __init__(self, **kwargs): + super(ErrorResponse, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class ErrorResponseException(HttpOperationError): + """Server responsed with exception of type: 'ErrorResponse'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args) + + +class FilterTrackPropertyCondition(Model): """The class to specify one track property condition. All required parameters must be populated in order to send to Azure. @@ -2538,1744 +1949,94 @@ class FilterTrackSelection(Model): 'track_selections': {'required': True}, } - _attribute_map = { - 'track_selections': {'key': 'trackSelections', 'type': '[FilterTrackPropertyCondition]'}, - } - - def __init__(self, **kwargs): - super(FilterTrackSelection, self).__init__(**kwargs) - self.track_selections = kwargs.get('track_selections', None) - - -class FirstQuality(Model): - """Filter First Quality. - - All required parameters must be populated in order to send to Azure. - - :param bitrate: Required. The first quality bitrate. - :type bitrate: int - """ - - _validation = { - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(FirstQuality, self).__init__(**kwargs) - self.bitrate = kwargs.get('bitrate', None) - - -class Format(Model): - """Base class for output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ImageFormat, MultiBitrateFormat - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'} - } - - def __init__(self, **kwargs): - super(Format, self).__init__(**kwargs) - self.filename_pattern = kwargs.get('filename_pattern', None) - self.odatatype = None - - -class InputDefinition(Model): - """Base class for defining an input. Use sub classes of this class to specify - tracks selections and related metadata. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FromAllInputFile, FromEachInputFile, InputFile - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.FromAllInputFile': 'FromAllInputFile', '#Microsoft.Media.FromEachInputFile': 'FromEachInputFile', '#Microsoft.Media.InputFile': 'InputFile'} - } - - def __init__(self, **kwargs): - super(InputDefinition, self).__init__(**kwargs) - self.included_tracks = kwargs.get('included_tracks', None) - self.odatatype = None - - -class FromAllInputFile(InputDefinition): - """An InputDefinition that looks across all of the files provided to select - tracks specified by the IncludedTracks property. Generally used with the - AudioTrackByAttribute and VideoTrackByAttribute to allow selection of a - single track across a set of input files. - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(FromAllInputFile, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.FromAllInputFile' - - -class FromEachInputFile(InputDefinition): - """An InputDefinition that looks at each input file provided to select tracks - specified by the IncludedTracks property. Generally used with the - AudioTrackByAttribute and VideoTrackByAttribute to select tracks from each - file given. - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(FromEachInputFile, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.FromEachInputFile' - - -class Layer(Model): - """The encoder can be configured to produce video and/or images (thumbnails) - at different resolutions, by specifying a layer for each desired - resolution. A layer represents the properties for the video or image at a - resolution. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H265VideoLayer, VideoLayer, JpgLayer, PngLayer - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H265VideoLayer': 'H265VideoLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer'} - } - - def __init__(self, **kwargs): - super(Layer, self).__init__(**kwargs) - self.width = kwargs.get('width', None) - self.height = kwargs.get('height', None) - self.label = kwargs.get('label', None) - self.odatatype = None - - -class VideoLayer(Layer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H264Layer - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. This is a required - field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Whether or not adaptive B-frames are to be used - when encoding this layer. If not specified, the encoder will turn it on - whenever the video profile permits its use. - :type adaptive_bframe: bool - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H264Layer': 'H264Layer'} - } - - def __init__(self, **kwargs): - super(VideoLayer, self).__init__(**kwargs) - self.bitrate = kwargs.get('bitrate', None) - self.max_bitrate = kwargs.get('max_bitrate', None) - self.b_frames = kwargs.get('b_frames', None) - self.frame_rate = kwargs.get('frame_rate', None) - self.slices = kwargs.get('slices', None) - self.adaptive_bframe = kwargs.get('adaptive_bframe', None) - self.odatatype = '#Microsoft.Media.VideoLayer' - - -class H264Layer(VideoLayer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer with the H.264 video codec. - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. This is a required - field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Whether or not adaptive B-frames are to be used - when encoding this layer. If not specified, the encoder will turn it on - whenever the video profile permits its use. - :type adaptive_bframe: bool - :param profile: We currently support Baseline, Main, High, High422, - High444. Default is Auto. Possible values include: 'Auto', 'Baseline', - 'Main', 'High', 'High422', 'High444' - :type profile: str or ~azure.mgmt.media.models.H264VideoProfile - :param level: We currently support Level up to 6.2. The value can be Auto, - or a number that matches the H.264 profile. If not specified, the default - is Auto, which lets the encoder choose the Level that is appropriate for - this layer. - :type level: str - :param buffer_window: The VBV buffer window length. The value should be in - ISO 8601 format. The value should be in the range [0.1-100] seconds. The - default is 5 seconds (for example, PT5S). - :type buffer_window: timedelta - :param reference_frames: The number of reference frames to be used when - encoding this layer. If not specified, the encoder determines an - appropriate number based on the encoder complexity setting. - :type reference_frames: int - :param entropy_mode: The entropy mode to be used for this layer. If not - specified, the encoder chooses the mode that is appropriate for the - profile and level. Possible values include: 'Cabac', 'Cavlc' - :type entropy_mode: str or ~azure.mgmt.media.models.EntropyMode - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - 'profile': {'key': 'profile', 'type': 'str'}, - 'level': {'key': 'level', 'type': 'str'}, - 'buffer_window': {'key': 'bufferWindow', 'type': 'duration'}, - 'reference_frames': {'key': 'referenceFrames', 'type': 'int'}, - 'entropy_mode': {'key': 'entropyMode', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(H264Layer, self).__init__(**kwargs) - self.profile = kwargs.get('profile', None) - self.level = kwargs.get('level', None) - self.buffer_window = kwargs.get('buffer_window', None) - self.reference_frames = kwargs.get('reference_frames', None) - self.entropy_mode = kwargs.get('entropy_mode', None) - self.odatatype = '#Microsoft.Media.H264Layer' - - -class Video(Codec): - """Describes the basic properties for encoding the input video. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H265Video, Image, H264Video - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H265Video': 'H265Video', '#Microsoft.Media.Image': 'Image', '#Microsoft.Media.H264Video': 'H264Video'} - } - - def __init__(self, **kwargs): - super(Video, self).__init__(**kwargs) - self.key_frame_interval = kwargs.get('key_frame_interval', None) - self.stretch_mode = kwargs.get('stretch_mode', None) - self.sync_mode = kwargs.get('sync_mode', None) - self.odatatype = '#Microsoft.Media.Video' - - -class H264Video(Video): - """Describes all the properties for encoding a video with the H.264 codec. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param scene_change_detection: Whether or not the encoder should insert - key frames at scene changes. If not specified, the default is false. This - flag should be set to true only when the encoder is being configured to - produce a single output video. - :type scene_change_detection: bool - :param complexity: Tells the encoder how to choose its encoding settings. - The default value is Balanced. Possible values include: 'Speed', - 'Balanced', 'Quality' - :type complexity: str or ~azure.mgmt.media.models.H264Complexity - :param layers: The collection of output H.264 layers to be produced by the - encoder. - :type layers: list[~azure.mgmt.media.models.H264Layer] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'}, - 'complexity': {'key': 'complexity', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[H264Layer]'}, - } - - def __init__(self, **kwargs): - super(H264Video, self).__init__(**kwargs) - self.scene_change_detection = kwargs.get('scene_change_detection', None) - self.complexity = kwargs.get('complexity', None) - self.layers = kwargs.get('layers', None) - self.odatatype = '#Microsoft.Media.H264Video' - - -class H265VideoLayer(Layer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H265Layer - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. For example: a - target bitrate of 3000Kbps or 3Mbps means this value should be 3000000 - This is a required field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Specifies whether or not adaptive B-frames are to - be used when encoding this layer. If not specified, the encoder will turn - it on whenever the video profile permits its use. - :type adaptive_bframe: bool - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H265Layer': 'H265Layer'} - } - - def __init__(self, **kwargs): - super(H265VideoLayer, self).__init__(**kwargs) - self.bitrate = kwargs.get('bitrate', None) - self.max_bitrate = kwargs.get('max_bitrate', None) - self.b_frames = kwargs.get('b_frames', None) - self.frame_rate = kwargs.get('frame_rate', None) - self.slices = kwargs.get('slices', None) - self.adaptive_bframe = kwargs.get('adaptive_bframe', None) - self.odatatype = '#Microsoft.Media.H265VideoLayer' - - -class H265Layer(H265VideoLayer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer with the H.265 video codec. - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. For example: a - target bitrate of 3000Kbps or 3Mbps means this value should be 3000000 - This is a required field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Specifies whether or not adaptive B-frames are to - be used when encoding this layer. If not specified, the encoder will turn - it on whenever the video profile permits its use. - :type adaptive_bframe: bool - :param profile: We currently support Main. Default is Auto. Possible - values include: 'Auto', 'Main' - :type profile: str or ~azure.mgmt.media.models.H265VideoProfile - :param level: We currently support Level up to 6.2. The value can be Auto, - or a number that matches the H.265 profile. If not specified, the default - is Auto, which lets the encoder choose the Level that is appropriate for - this layer. - :type level: str - :param buffer_window: The VBV buffer window length. The value should be in - ISO 8601 format. The value should be in the range [0.1-100] seconds. The - default is 5 seconds (for example, PT5S). - :type buffer_window: timedelta - :param reference_frames: The number of reference frames to be used when - encoding this layer. If not specified, the encoder determines an - appropriate number based on the encoder complexity setting. - :type reference_frames: int - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - 'profile': {'key': 'profile', 'type': 'str'}, - 'level': {'key': 'level', 'type': 'str'}, - 'buffer_window': {'key': 'bufferWindow', 'type': 'duration'}, - 'reference_frames': {'key': 'referenceFrames', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(H265Layer, self).__init__(**kwargs) - self.profile = kwargs.get('profile', None) - self.level = kwargs.get('level', None) - self.buffer_window = kwargs.get('buffer_window', None) - self.reference_frames = kwargs.get('reference_frames', None) - self.odatatype = '#Microsoft.Media.H265Layer' - - -class H265Video(Video): - """Describes all the properties for encoding a video with the H.265 codec. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param scene_change_detection: Specifies whether or not the encoder should - insert key frames at scene changes. If not specified, the default is - false. This flag should be set to true only when the encoder is being - configured to produce a single output video. - :type scene_change_detection: bool - :param complexity: Tells the encoder how to choose its encoding settings. - Quality will provide for a higher compression ratio but at a higher cost - and longer compute time. Speed will produce a relatively larger file but - is faster and more economical. The default value is Balanced. Possible - values include: 'Speed', 'Balanced', 'Quality' - :type complexity: str or ~azure.mgmt.media.models.H265Complexity - :param layers: The collection of output H.265 layers to be produced by the - encoder. - :type layers: list[~azure.mgmt.media.models.H265Layer] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'}, - 'complexity': {'key': 'complexity', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[H265Layer]'}, - } - - def __init__(self, **kwargs): - super(H265Video, self).__init__(**kwargs) - self.scene_change_detection = kwargs.get('scene_change_detection', None) - self.complexity = kwargs.get('complexity', None) - self.layers = kwargs.get('layers', None) - self.odatatype = '#Microsoft.Media.H265Video' - - -class Hls(Model): - """HTTP Live Streaming (HLS) packing setting for the live output. - - :param fragments_per_ts_segment: The number of fragments in an HTTP Live - Streaming (HLS) TS segment in the output of the live event. This value - does not affect the packing ratio for HLS CMAF output. - :type fragments_per_ts_segment: int - """ - - _attribute_map = { - 'fragments_per_ts_segment': {'key': 'fragmentsPerTsSegment', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(Hls, self).__init__(**kwargs) - self.fragments_per_ts_segment = kwargs.get('fragments_per_ts_segment', None) - - -class Image(Video): - """Describes the basic properties for generating thumbnails from the input - video. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JpgImage, PngImage - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param start: Required. The position in the input video from where to - start generating thumbnails. The value can be in ISO 8601 format (For - example, PT05S to start at 5 seconds), or a frame count (For example, 10 - to start at the 10th frame), or a relative value to stream duration (For - example, 10% to start at 10% of stream duration). Also supports a macro - {Best}, which tells the encoder to select the best thumbnail from the - first few seconds of the video and will only produce one thumbnail, no - matter what other settings are for Step and Range. The default value is - macro {Best}. - :type start: str - :param step: The intervals at which thumbnails are generated. The value - can be in ISO 8601 format (For example, PT05S for one image every 5 - seconds), or a frame count (For example, 30 for one image every 30 - frames), or a relative value to stream duration (For example, 10% for one - image every 10% of stream duration). Note: Step value will affect the - first generated thumbnail, which may not be exactly the one specified at - transform preset start time. This is due to the encoder, which tries to - select the best thumbnail between start time and Step position from start - time as the first output. As the default value is 10%, it means if stream - has long duration, the first generated thumbnail might be far away from - the one specified at start time. Try to select reasonable value for Step - if the first thumbnail is expected close to start time, or set Range value - at 1 if only one thumbnail is needed at start time. - :type step: str - :param range: The position relative to transform preset start time in the - input video at which to stop generating thumbnails. The value can be in - ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds - from start time), or a frame count (For example, 300 to stop at the 300th - frame from the frame at start time. If this value is 1, it means only - producing one thumbnail at start time), or a relative value to the stream - duration (For example, 50% to stop at half of stream duration from start - time). The default value is 100%, which means to stop at the end of the - stream. - :type range: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'start': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'str'}, - 'step': {'key': 'step', 'type': 'str'}, - 'range': {'key': 'range', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JpgImage': 'JpgImage', '#Microsoft.Media.PngImage': 'PngImage'} - } - - def __init__(self, **kwargs): - super(Image, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.step = kwargs.get('step', None) - self.range = kwargs.get('range', None) - self.odatatype = '#Microsoft.Media.Image' - - -class ImageFormat(Format): - """Describes the properties for an output image file. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JpgFormat, PngFormat - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.PngFormat': 'PngFormat'} - } - - def __init__(self, **kwargs): - super(ImageFormat, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.ImageFormat' - - -class InputFile(InputDefinition): - """An InputDefinition for a single file. TrackSelections are scoped to the - file specified. - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param filename: Name of the file that this input definition applies to. - :type filename: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'filename': {'key': 'filename', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(InputFile, self).__init__(**kwargs) - self.filename = kwargs.get('filename', None) - self.odatatype = '#Microsoft.Media.InputFile' - - -class IPAccessControl(Model): - """The IP access control. - - :param allow: The IP allow list. - :type allow: list[~azure.mgmt.media.models.IPRange] - """ - - _attribute_map = { - 'allow': {'key': 'allow', 'type': '[IPRange]'}, - } - - def __init__(self, **kwargs): - super(IPAccessControl, self).__init__(**kwargs) - self.allow = kwargs.get('allow', None) - - -class IPRange(Model): - """The IP address range in the CIDR scheme. - - :param name: The friendly name for the IP address range. - :type name: str - :param address: The IP address. - :type address: str - :param subnet_prefix_length: The subnet mask prefix length (see CIDR - notation). - :type subnet_prefix_length: int - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'address': {'key': 'address', 'type': 'str'}, - 'subnet_prefix_length': {'key': 'subnetPrefixLength', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(IPRange, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.address = kwargs.get('address', None) - self.subnet_prefix_length = kwargs.get('subnet_prefix_length', None) - - -class Job(ProxyResource): - """A Job resource type. The progress and state can be obtained by polling a - Job or subscribing to events using EventGrid. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - :vartype id: str - :ivar name: The name of the resource - :vartype name: str - :ivar type: The type of the resource. E.g. - "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - :vartype type: str - :ivar created: The UTC date and time when the customer has created the - Job, in 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype created: datetime - :ivar state: The current state of the job. Possible values include: - 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', 'Queued', - 'Scheduled' - :vartype state: str or ~azure.mgmt.media.models.JobState - :param description: Optional customer supplied description of the Job. - :type description: str - :param input: Required. The inputs for the Job. - :type input: ~azure.mgmt.media.models.JobInput - :ivar last_modified: The UTC date and time when the customer has last - updated the Job, in 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype last_modified: datetime - :param outputs: Required. The outputs for the Job. - :type outputs: list[~azure.mgmt.media.models.JobOutput] - :param priority: Priority with which the job should be processed. Higher - priority jobs are processed before lower priority jobs. If not set, the - default is normal. Possible values include: 'Low', 'Normal', 'High' - :type priority: str or ~azure.mgmt.media.models.Priority - :param correlation_data: Customer provided key, value pairs that will be - returned in Job and JobOutput state events. - :type correlation_data: dict[str, str] - :ivar start_time: The UTC date and time at which this Job began - processing. - :vartype start_time: datetime - :ivar end_time: The UTC date and time at which this Job finished - processing. - :vartype end_time: datetime - :ivar system_data: The system metadata relating to this resource. - :vartype system_data: ~azure.mgmt.media.models.SystemData - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'created': {'readonly': True}, - 'state': {'readonly': True}, - 'input': {'required': True}, - 'last_modified': {'readonly': True}, - 'outputs': {'required': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'system_data': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'created': {'key': 'properties.created', 'type': 'iso-8601'}, - 'state': {'key': 'properties.state', 'type': 'str'}, - 'description': {'key': 'properties.description', 'type': 'str'}, - 'input': {'key': 'properties.input', 'type': 'JobInput'}, - 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, - 'outputs': {'key': 'properties.outputs', 'type': '[JobOutput]'}, - 'priority': {'key': 'properties.priority', 'type': 'str'}, - 'correlation_data': {'key': 'properties.correlationData', 'type': '{str}'}, - 'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'}, - 'system_data': {'key': 'systemData', 'type': 'SystemData'}, - } - - def __init__(self, **kwargs): - super(Job, self).__init__(**kwargs) - self.created = None - self.state = None - self.description = kwargs.get('description', None) - self.input = kwargs.get('input', None) - self.last_modified = None - self.outputs = kwargs.get('outputs', None) - self.priority = kwargs.get('priority', None) - self.correlation_data = kwargs.get('correlation_data', None) - self.start_time = None - self.end_time = None - self.system_data = None - - -class JobError(Model): - """Details of JobOutput errors. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar code: Error code describing the error. Possible values include: - 'ServiceError', 'ServiceTransientError', 'DownloadNotAccessible', - 'DownloadTransientError', 'UploadNotAccessible', 'UploadTransientError', - 'ConfigurationUnsupported', 'ContentMalformed', 'ContentUnsupported' - :vartype code: str or ~azure.mgmt.media.models.JobErrorCode - :ivar message: A human-readable language-dependent representation of the - error. - :vartype message: str - :ivar category: Helps with categorization of errors. Possible values - include: 'Service', 'Download', 'Upload', 'Configuration', 'Content' - :vartype category: str or ~azure.mgmt.media.models.JobErrorCategory - :ivar retry: Indicates that it may be possible to retry the Job. If retry - is unsuccessful, please contact Azure support via Azure Portal. Possible - values include: 'DoNotRetry', 'MayRetry' - :vartype retry: str or ~azure.mgmt.media.models.JobRetry - :ivar details: An array of details about specific errors that led to this - reported error. - :vartype details: list[~azure.mgmt.media.models.JobErrorDetail] - """ - - _validation = { - 'code': {'readonly': True}, - 'message': {'readonly': True}, - 'category': {'readonly': True}, - 'retry': {'readonly': True}, - 'details': {'readonly': True}, - } - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'category': {'key': 'category', 'type': 'str'}, - 'retry': {'key': 'retry', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[JobErrorDetail]'}, - } - - def __init__(self, **kwargs): - super(JobError, self).__init__(**kwargs) - self.code = None - self.message = None - self.category = None - self.retry = None - self.details = None - - -class JobErrorDetail(Model): - """Details of JobOutput errors. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar code: Code describing the error detail. - :vartype code: str - :ivar message: A human-readable representation of the error. - :vartype message: str - """ - - _validation = { - 'code': {'readonly': True}, - 'message': {'readonly': True}, - } - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobErrorDetail, self).__init__(**kwargs) - self.code = None - self.message = None - - -class JobInput(Model): - """Base class for inputs to a Job. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobInputClip, JobInputs, JobInputSequence - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputs': 'JobInputs', '#Microsoft.Media.JobInputSequence': 'JobInputSequence'} - } - - def __init__(self, **kwargs): - super(JobInput, self).__init__(**kwargs) - self.odatatype = None - - -class JobInputClip(JobInput): - """Represents input files for a Job. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobInputAsset, JobInputHttp - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param files: List of files. Required for JobInputHttp. Maximum of 4000 - characters each. - :type files: list[str] - :param start: Defines a point on the timeline of the input media at which - processing will start. Defaults to the beginning of the input media. - :type start: ~azure.mgmt.media.models.ClipTime - :param end: Defines a point on the timeline of the input media at which - processing will end. Defaults to the end of the input media. - :type end: ~azure.mgmt.media.models.ClipTime - :param label: A label that is assigned to a JobInputClip, that is used to - satisfy a reference used in the Transform. For example, a Transform can be - authored so as to take an image file with the label 'xyz' and apply it as - an overlay onto the input video before it is encoded. When submitting a - Job, exactly one of the JobInputs should be the image file, and it should - have the label 'xyz'. - :type label: str - :param input_definitions: Defines a list of InputDefinitions. For each - InputDefinition, it defines a list of track selections and related - metadata. - :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'files': {'key': 'files', 'type': '[str]'}, - 'start': {'key': 'start', 'type': 'ClipTime'}, - 'end': {'key': 'end', 'type': 'ClipTime'}, - 'label': {'key': 'label', 'type': 'str'}, - 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JobInputAsset': 'JobInputAsset', '#Microsoft.Media.JobInputHttp': 'JobInputHttp'} - } - - def __init__(self, **kwargs): - super(JobInputClip, self).__init__(**kwargs) - self.files = kwargs.get('files', None) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - self.label = kwargs.get('label', None) - self.input_definitions = kwargs.get('input_definitions', None) - self.odatatype = '#Microsoft.Media.JobInputClip' - - -class JobInputAsset(JobInputClip): - """Represents an Asset for input into a Job. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param files: List of files. Required for JobInputHttp. Maximum of 4000 - characters each. - :type files: list[str] - :param start: Defines a point on the timeline of the input media at which - processing will start. Defaults to the beginning of the input media. - :type start: ~azure.mgmt.media.models.ClipTime - :param end: Defines a point on the timeline of the input media at which - processing will end. Defaults to the end of the input media. - :type end: ~azure.mgmt.media.models.ClipTime - :param label: A label that is assigned to a JobInputClip, that is used to - satisfy a reference used in the Transform. For example, a Transform can be - authored so as to take an image file with the label 'xyz' and apply it as - an overlay onto the input video before it is encoded. When submitting a - Job, exactly one of the JobInputs should be the image file, and it should - have the label 'xyz'. - :type label: str - :param input_definitions: Defines a list of InputDefinitions. For each - InputDefinition, it defines a list of track selections and related - metadata. - :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] - :param asset_name: Required. The name of the input Asset. - :type asset_name: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'asset_name': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'files': {'key': 'files', 'type': '[str]'}, - 'start': {'key': 'start', 'type': 'ClipTime'}, - 'end': {'key': 'end', 'type': 'ClipTime'}, - 'label': {'key': 'label', 'type': 'str'}, - 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, - 'asset_name': {'key': 'assetName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobInputAsset, self).__init__(**kwargs) - self.asset_name = kwargs.get('asset_name', None) - self.odatatype = '#Microsoft.Media.JobInputAsset' - - -class JobInputHttp(JobInputClip): - """Represents HTTPS job input. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param files: List of files. Required for JobInputHttp. Maximum of 4000 - characters each. - :type files: list[str] - :param start: Defines a point on the timeline of the input media at which - processing will start. Defaults to the beginning of the input media. - :type start: ~azure.mgmt.media.models.ClipTime - :param end: Defines a point on the timeline of the input media at which - processing will end. Defaults to the end of the input media. - :type end: ~azure.mgmt.media.models.ClipTime - :param label: A label that is assigned to a JobInputClip, that is used to - satisfy a reference used in the Transform. For example, a Transform can be - authored so as to take an image file with the label 'xyz' and apply it as - an overlay onto the input video before it is encoded. When submitting a - Job, exactly one of the JobInputs should be the image file, and it should - have the label 'xyz'. - :type label: str - :param input_definitions: Defines a list of InputDefinitions. For each - InputDefinition, it defines a list of track selections and related - metadata. - :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] - :param base_uri: Base URI for HTTPS job input. It will be concatenated - with provided file names. If no base uri is given, then the provided file - list is assumed to be fully qualified uris. Maximum length of 4000 - characters. - :type base_uri: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'files': {'key': 'files', 'type': '[str]'}, - 'start': {'key': 'start', 'type': 'ClipTime'}, - 'end': {'key': 'end', 'type': 'ClipTime'}, - 'label': {'key': 'label', 'type': 'str'}, - 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, - 'base_uri': {'key': 'baseUri', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobInputHttp, self).__init__(**kwargs) - self.base_uri = kwargs.get('base_uri', None) - self.odatatype = '#Microsoft.Media.JobInputHttp' - - -class JobInputs(JobInput): - """Describes a list of inputs to a Job. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param inputs: List of inputs to a Job. - :type inputs: list[~azure.mgmt.media.models.JobInput] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[JobInput]'}, - } - - def __init__(self, **kwargs): - super(JobInputs, self).__init__(**kwargs) - self.inputs = kwargs.get('inputs', None) - self.odatatype = '#Microsoft.Media.JobInputs' - - -class JobInputSequence(JobInput): - """A Sequence contains an ordered list of Clips where each clip is a JobInput. - The Sequence will be treated as a single input. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param inputs: JobInputs that make up the timeline. - :type inputs: list[~azure.mgmt.media.models.JobInputClip] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[JobInputClip]'}, - } - - def __init__(self, **kwargs): - super(JobInputSequence, self).__init__(**kwargs) - self.inputs = kwargs.get('inputs', None) - self.odatatype = '#Microsoft.Media.JobInputSequence' - - -class JobOutput(Model): - """Describes all the properties of a JobOutput. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobOutputAsset - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar error: If the JobOutput is in the Error state, it contains the - details of the error. - :vartype error: ~azure.mgmt.media.models.JobError - :ivar state: Describes the state of the JobOutput. Possible values - include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', - 'Queued', 'Scheduled' - :vartype state: str or ~azure.mgmt.media.models.JobState - :ivar progress: If the JobOutput is in a Processing state, this contains - the Job completion percentage. The value is an estimate and not intended - to be used to predict Job completion times. To determine if the JobOutput - is complete, use the State property. - :vartype progress: int - :param label: A label that is assigned to a JobOutput in order to help - uniquely identify it. This is useful when your Transform has more than one - TransformOutput, whereby your Job has more than one JobOutput. In such - cases, when you submit the Job, you will add two or more JobOutputs, in - the same order as TransformOutputs in the Transform. Subsequently, when - you retrieve the Job, either through events or on a GET request, you can - use the label to easily identify the JobOutput. If a label is not - provided, a default value of '{presetName}_{outputIndex}' will be used, - where the preset name is the name of the preset in the corresponding - TransformOutput and the output index is the relative index of the this - JobOutput within the Job. Note that this index is the same as the relative - index of the corresponding TransformOutput within its Transform. - :type label: str - :ivar start_time: The UTC date and time at which this Job Output began - processing. - :vartype start_time: datetime - :ivar end_time: The UTC date and time at which this Job Output finished - processing. - :vartype end_time: datetime - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'error': {'readonly': True}, - 'state': {'readonly': True}, - 'progress': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'error': {'key': 'error', 'type': 'JobError'}, - 'state': {'key': 'state', 'type': 'str'}, - 'progress': {'key': 'progress', 'type': 'int'}, - 'label': {'key': 'label', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JobOutputAsset': 'JobOutputAsset'} - } - - def __init__(self, **kwargs): - super(JobOutput, self).__init__(**kwargs) - self.error = None - self.state = None - self.progress = None - self.label = kwargs.get('label', None) - self.start_time = None - self.end_time = None - self.odatatype = None - - -class JobOutputAsset(JobOutput): - """Represents an Asset used as a JobOutput. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar error: If the JobOutput is in the Error state, it contains the - details of the error. - :vartype error: ~azure.mgmt.media.models.JobError - :ivar state: Describes the state of the JobOutput. Possible values - include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', - 'Queued', 'Scheduled' - :vartype state: str or ~azure.mgmt.media.models.JobState - :ivar progress: If the JobOutput is in a Processing state, this contains - the Job completion percentage. The value is an estimate and not intended - to be used to predict Job completion times. To determine if the JobOutput - is complete, use the State property. - :vartype progress: int - :param label: A label that is assigned to a JobOutput in order to help - uniquely identify it. This is useful when your Transform has more than one - TransformOutput, whereby your Job has more than one JobOutput. In such - cases, when you submit the Job, you will add two or more JobOutputs, in - the same order as TransformOutputs in the Transform. Subsequently, when - you retrieve the Job, either through events or on a GET request, you can - use the label to easily identify the JobOutput. If a label is not - provided, a default value of '{presetName}_{outputIndex}' will be used, - where the preset name is the name of the preset in the corresponding - TransformOutput and the output index is the relative index of the this - JobOutput within the Job. Note that this index is the same as the relative - index of the corresponding TransformOutput within its Transform. - :type label: str - :ivar start_time: The UTC date and time at which this Job Output began - processing. - :vartype start_time: datetime - :ivar end_time: The UTC date and time at which this Job Output finished - processing. - :vartype end_time: datetime - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param asset_name: Required. The name of the output Asset. - :type asset_name: str - """ - - _validation = { - 'error': {'readonly': True}, - 'state': {'readonly': True}, - 'progress': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'odatatype': {'required': True}, - 'asset_name': {'required': True}, - } - - _attribute_map = { - 'error': {'key': 'error', 'type': 'JobError'}, - 'state': {'key': 'state', 'type': 'str'}, - 'progress': {'key': 'progress', 'type': 'int'}, - 'label': {'key': 'label', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'asset_name': {'key': 'assetName', 'type': 'str'}, - } - + _attribute_map = { + 'track_selections': {'key': 'trackSelections', 'type': '[FilterTrackPropertyCondition]'}, + } + def __init__(self, **kwargs): - super(JobOutputAsset, self).__init__(**kwargs) - self.asset_name = kwargs.get('asset_name', None) - self.odatatype = '#Microsoft.Media.JobOutputAsset' + super(FilterTrackSelection, self).__init__(**kwargs) + self.track_selections = kwargs.get('track_selections', None) -class JpgFormat(ImageFormat): - """Describes the settings for producing JPEG thumbnails. +class FirstQuality(Model): + """Filter First Quality. All required parameters must be populated in order to send to Azure. - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param bitrate: Required. The first quality bitrate. + :type bitrate: int """ _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, + 'bitrate': {'required': True}, } _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + 'bitrate': {'key': 'bitrate', 'type': 'int'}, } def __init__(self, **kwargs): - super(JpgFormat, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.JpgFormat' - + super(FirstQuality, self).__init__(**kwargs) + self.bitrate = kwargs.get('bitrate', None) -class JpgImage(Image): - """Describes the properties for producing a series of JPEG images from the - input video. - All required parameters must be populated in order to send to Azure. +class Hls(Model): + """HTTP Live Streaming (HLS) packing setting for the live output. - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param start: Required. The position in the input video from where to - start generating thumbnails. The value can be in ISO 8601 format (For - example, PT05S to start at 5 seconds), or a frame count (For example, 10 - to start at the 10th frame), or a relative value to stream duration (For - example, 10% to start at 10% of stream duration). Also supports a macro - {Best}, which tells the encoder to select the best thumbnail from the - first few seconds of the video and will only produce one thumbnail, no - matter what other settings are for Step and Range. The default value is - macro {Best}. - :type start: str - :param step: The intervals at which thumbnails are generated. The value - can be in ISO 8601 format (For example, PT05S for one image every 5 - seconds), or a frame count (For example, 30 for one image every 30 - frames), or a relative value to stream duration (For example, 10% for one - image every 10% of stream duration). Note: Step value will affect the - first generated thumbnail, which may not be exactly the one specified at - transform preset start time. This is due to the encoder, which tries to - select the best thumbnail between start time and Step position from start - time as the first output. As the default value is 10%, it means if stream - has long duration, the first generated thumbnail might be far away from - the one specified at start time. Try to select reasonable value for Step - if the first thumbnail is expected close to start time, or set Range value - at 1 if only one thumbnail is needed at start time. - :type step: str - :param range: The position relative to transform preset start time in the - input video at which to stop generating thumbnails. The value can be in - ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds - from start time), or a frame count (For example, 300 to stop at the 300th - frame from the frame at start time. If this value is 1, it means only - producing one thumbnail at start time), or a relative value to the stream - duration (For example, 50% to stop at half of stream duration from start - time). The default value is 100%, which means to stop at the end of the - stream. - :type range: str - :param layers: A collection of output JPEG image layers to be produced by - the encoder. - :type layers: list[~azure.mgmt.media.models.JpgLayer] - :param sprite_column: Sets the number of columns used in thumbnail sprite - image. The number of rows are automatically calculated and a VTT file is - generated with the coordinate mappings for each thumbnail in the sprite. - Note: this value should be a positive integer and a proper value is - recommended so that the output image resolution will not go beyond JPEG - maximum pixel resolution limit 65535x65535. - :type sprite_column: int + :param fragments_per_ts_segment: The number of fragments in an HTTP Live + Streaming (HLS) TS segment in the output of the live event. This value + does not affect the packing ratio for HLS CMAF output. + :type fragments_per_ts_segment: int """ - _validation = { - 'odatatype': {'required': True}, - 'start': {'required': True}, - } - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'str'}, - 'step': {'key': 'step', 'type': 'str'}, - 'range': {'key': 'range', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[JpgLayer]'}, - 'sprite_column': {'key': 'spriteColumn', 'type': 'int'}, + 'fragments_per_ts_segment': {'key': 'fragmentsPerTsSegment', 'type': 'int'}, } def __init__(self, **kwargs): - super(JpgImage, self).__init__(**kwargs) - self.layers = kwargs.get('layers', None) - self.sprite_column = kwargs.get('sprite_column', None) - self.odatatype = '#Microsoft.Media.JpgImage' - + super(Hls, self).__init__(**kwargs) + self.fragments_per_ts_segment = kwargs.get('fragments_per_ts_segment', None) -class JpgLayer(Layer): - """Describes the settings to produce a JPEG image from the input video. - All required parameters must be populated in order to send to Azure. +class IPAccessControl(Model): + """The IP access control. - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param quality: The compression quality of the JPEG output. Range is from - 0-100 and the default is 70. - :type quality: int + :param allow: The IP allow list. + :type allow: list[~azure.mgmt.media.models.IPRange] """ - _validation = { - 'odatatype': {'required': True}, + _attribute_map = { + 'allow': {'key': 'allow', 'type': '[IPRange]'}, } + def __init__(self, **kwargs): + super(IPAccessControl, self).__init__(**kwargs) + self.allow = kwargs.get('allow', None) + + +class IPRange(Model): + """The IP address range in the CIDR scheme. + + :param name: The friendly name for the IP address range. + :type name: str + :param address: The IP address. + :type address: str + :param subnet_prefix_length: The subnet mask prefix length (see CIDR + notation). + :type subnet_prefix_length: int + """ + _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'address': {'key': 'address', 'type': 'str'}, + 'subnet_prefix_length': {'key': 'subnetPrefixLength', 'type': 'int'}, } def __init__(self, **kwargs): - super(JpgLayer, self).__init__(**kwargs) - self.quality = kwargs.get('quality', None) - self.odatatype = '#Microsoft.Media.JpgLayer' + super(IPRange, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.address = kwargs.get('address', None) + self.subnet_prefix_length = kwargs.get('subnet_prefix_length', None) class KeyDelivery(Model): @@ -4626,7 +2387,8 @@ class LiveEventEncoding(Model): encoder transcodes the incoming stream into multiple bitrates or layers. See https://go.microsoft.com/fwlink/?linkid=2095101 for more information. This property cannot be modified after the live event is created. Possible - values include: 'None', 'Standard', 'Premium1080p' + values include: 'None', 'Standard', 'Premium1080p', 'PassthroughBasic', + 'PassthroughStandard' :type encoding_type: str or ~azure.mgmt.media.models.LiveEventEncodingType :param preset_name: The optional encoding preset name, used when encodingType is not None. This value is specified at creation time and @@ -4941,6 +2703,8 @@ class LiveOutput(ProxyResource): values include: 'Creating', 'Running', 'Deleting' :vartype resource_state: str or ~azure.mgmt.media.models.LiveOutputResourceState + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData """ _validation = { @@ -4953,6 +2717,7 @@ class LiveOutput(ProxyResource): 'last_modified': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'resource_state': {'readonly': True}, + 'system_data': {'readonly': True}, } _attribute_map = { @@ -4969,6 +2734,7 @@ class LiveOutput(ProxyResource): 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_state': {'key': 'properties.resourceState', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__(self, **kwargs): @@ -4983,28 +2749,7 @@ def __init__(self, **kwargs): self.last_modified = None self.provisioning_state = None self.resource_state = None - - -class Location(Model): - """Location. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Location, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.system_data = None class LogSpecification(Model): @@ -5073,6 +2818,11 @@ class MediaService(TrackedResource): :param key_delivery: The Key Delivery properties for Media Services account. :type key_delivery: ~azure.mgmt.media.models.KeyDelivery + :param public_network_access: Whether or not public network access is + allowed for resources under the Media Services account. Possible values + include: 'Enabled', 'Disabled' + :type public_network_access: str or + ~azure.mgmt.media.models.PublicNetworkAccess :param identity: The Managed Identity for the Media Services account. :type identity: ~azure.mgmt.media.models.MediaServiceIdentity :ivar system_data: The system metadata relating to this resource. @@ -5099,6 +2849,7 @@ class MediaService(TrackedResource): 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, + 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } @@ -5110,6 +2861,7 @@ def __init__(self, **kwargs): self.storage_authentication = kwargs.get('storage_authentication', None) self.encryption = kwargs.get('encryption', None) self.key_delivery = kwargs.get('key_delivery', None) + self.public_network_access = kwargs.get('public_network_access', None) self.identity = kwargs.get('identity', None) self.system_data = None @@ -5122,13 +2874,15 @@ class MediaServiceIdentity(Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The identity type. Possible values include: - 'SystemAssigned', 'None' - :type type: str or ~azure.mgmt.media.models.ManagedIdentityType + :param type: Required. The identity type. + :type type: str :ivar principal_id: The Principal ID of the identity. :vartype principal_id: str :ivar tenant_id: The Tenant ID of the identity. :vartype tenant_id: str + :param user_assigned_identities: The user assigned managed identities. + :type user_assigned_identities: dict[str, + ~azure.mgmt.media.models.UserAssignedManagedIdentity] """ _validation = { @@ -5141,6 +2895,7 @@ class MediaServiceIdentity(Model): 'type': {'key': 'type', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, 'tenant_id': {'key': 'tenantId', 'type': 'str'}, + 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__(self, **kwargs): @@ -5148,6 +2903,7 @@ def __init__(self, **kwargs): self.type = kwargs.get('type', None) self.principal_id = None self.tenant_id = None + self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class MediaServiceUpdate(Model): @@ -5171,6 +2927,11 @@ class MediaServiceUpdate(Model): :param key_delivery: The Key Delivery properties for Media Services account. :type key_delivery: ~azure.mgmt.media.models.KeyDelivery + :param public_network_access: Whether or not public network access is + allowed for resources under the Media Services account. Possible values + include: 'Enabled', 'Disabled' + :type public_network_access: str or + ~azure.mgmt.media.models.PublicNetworkAccess :param identity: The Managed Identity for the Media Services account. :type identity: ~azure.mgmt.media.models.MediaServiceIdentity """ @@ -5186,6 +2947,7 @@ class MediaServiceUpdate(Model): 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, + 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, } @@ -5197,6 +2959,7 @@ def __init__(self, **kwargs): self.storage_authentication = kwargs.get('storage_authentication', None) self.encryption = kwargs.get('encryption', None) self.key_delivery = kwargs.get('key_delivery', None) + self.public_network_access = kwargs.get('public_network_access', None) self.identity = kwargs.get('identity', None) @@ -5316,102 +3079,6 @@ def __init__(self, **kwargs): self.supported_time_grain_types = None -class MultiBitrateFormat(Format): - """Describes the properties for producing a collection of GOP aligned - multi-bitrate files. The default behavior is to produce one output file for - each video layer which is muxed together with all the audios. The exact - output files produced can be controlled by specifying the outputFiles - collection. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Mp4Format, TransportStreamFormat - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param output_files: The list of output files to produce. Each entry in - the list is a set of audio and video layer labels to be muxed together . - :type output_files: list[~azure.mgmt.media.models.OutputFile] - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.Mp4Format': 'Mp4Format', '#Microsoft.Media.TransportStreamFormat': 'TransportStreamFormat'} - } - - def __init__(self, **kwargs): - super(MultiBitrateFormat, self).__init__(**kwargs) - self.output_files = kwargs.get('output_files', None) - self.odatatype = '#Microsoft.Media.MultiBitrateFormat' - - -class Mp4Format(MultiBitrateFormat): - """Describes the properties for an output ISO MP4 file. - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param output_files: The list of output files to produce. Each entry in - the list is a set of audio and video layer labels to be muxed together . - :type output_files: list[~azure.mgmt.media.models.OutputFile] - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - } - - def __init__(self, **kwargs): - super(Mp4Format, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.Mp4Format' - - class NoEncryption(Model): """Class for NoEncryption scheme. @@ -5428,35 +3095,6 @@ def __init__(self, **kwargs): self.enabled_protocols = kwargs.get('enabled_protocols', None) -class ODataError(Model): - """Information about an error. - - :param code: A language-independent error name. - :type code: str - :param message: The error message. - :type message: str - :param target: The target of the error (for example, the name of the - property in error). - :type target: str - :param details: The error details. - :type details: list[~azure.mgmt.media.models.ODataError] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'target': {'key': 'target', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[ODataError]'}, - } - - def __init__(self, **kwargs): - super(ODataError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - self.target = kwargs.get('target', None) - self.details = kwargs.get('details', None) - - class Operation(Model): """An operation. @@ -5500,6 +3138,22 @@ def __init__(self, **kwargs): self.action_type = kwargs.get('action_type', None) +class OperationCollection(Model): + """A collection of Operation items. + + :param value: A collection of Operation items. + :type value: list[~azure.mgmt.media.models.Operation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[Operation]'}, + } + + def __init__(self, **kwargs): + super(OperationCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + + class OperationDisplay(Model): """Operation details. @@ -5514,206 +3168,18 @@ class OperationDisplay(Model): """ _attribute_map = { - 'provider': {'key': 'provider', 'type': 'str'}, - 'resource': {'key': 'resource', 'type': 'str'}, - 'operation': {'key': 'operation', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(OperationDisplay, self).__init__(**kwargs) - self.provider = kwargs.get('provider', None) - self.resource = kwargs.get('resource', None) - self.operation = kwargs.get('operation', None) - self.description = kwargs.get('description', None) - - -class OutputFile(Model): - """Represents an output file produced. - - All required parameters must be populated in order to send to Azure. - - :param labels: Required. The list of labels that describe how the encoder - should multiplex video and audio into an output file. For example, if the - encoder is producing two video layers with labels v1 and v2, and one audio - layer with label a1, then an array like '[v1, a1]' tells the encoder to - produce an output file with the video track represented by v1 and the - audio track represented by a1. - :type labels: list[str] - """ - - _validation = { - 'labels': {'required': True}, - } - - _attribute_map = { - 'labels': {'key': 'labels', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(OutputFile, self).__init__(**kwargs) - self.labels = kwargs.get('labels', None) - - -class PngFormat(ImageFormat): - """Describes the settings for producing PNG thumbnails. - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(PngFormat, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.PngFormat' - - -class PngImage(Image): - """Describes the properties for producing a series of PNG images from the - input video. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param start: Required. The position in the input video from where to - start generating thumbnails. The value can be in ISO 8601 format (For - example, PT05S to start at 5 seconds), or a frame count (For example, 10 - to start at the 10th frame), or a relative value to stream duration (For - example, 10% to start at 10% of stream duration). Also supports a macro - {Best}, which tells the encoder to select the best thumbnail from the - first few seconds of the video and will only produce one thumbnail, no - matter what other settings are for Step and Range. The default value is - macro {Best}. - :type start: str - :param step: The intervals at which thumbnails are generated. The value - can be in ISO 8601 format (For example, PT05S for one image every 5 - seconds), or a frame count (For example, 30 for one image every 30 - frames), or a relative value to stream duration (For example, 10% for one - image every 10% of stream duration). Note: Step value will affect the - first generated thumbnail, which may not be exactly the one specified at - transform preset start time. This is due to the encoder, which tries to - select the best thumbnail between start time and Step position from start - time as the first output. As the default value is 10%, it means if stream - has long duration, the first generated thumbnail might be far away from - the one specified at start time. Try to select reasonable value for Step - if the first thumbnail is expected close to start time, or set Range value - at 1 if only one thumbnail is needed at start time. - :type step: str - :param range: The position relative to transform preset start time in the - input video at which to stop generating thumbnails. The value can be in - ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds - from start time), or a frame count (For example, 300 to stop at the 300th - frame from the frame at start time. If this value is 1, it means only - producing one thumbnail at start time), or a relative value to the stream - duration (For example, 50% to stop at half of stream duration from start - time). The default value is 100%, which means to stop at the end of the - stream. - :type range: str - :param layers: A collection of output PNG image layers to be produced by - the encoder. - :type layers: list[~azure.mgmt.media.models.PngLayer] - """ - - _validation = { - 'odatatype': {'required': True}, - 'start': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'str'}, - 'step': {'key': 'step', 'type': 'str'}, - 'range': {'key': 'range', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[PngLayer]'}, - } - - def __init__(self, **kwargs): - super(PngImage, self).__init__(**kwargs) - self.layers = kwargs.get('layers', None) - self.odatatype = '#Microsoft.Media.PngImage' - - -class PngLayer(Layer): - """Describes the settings to produce a PNG image from the input video. - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'resource': {'key': 'resource', 'type': 'str'}, + 'operation': {'key': 'operation', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, } def __init__(self, **kwargs): - super(PngLayer, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.PngLayer' + super(OperationDisplay, self).__init__(**kwargs) + self.provider = kwargs.get('provider', None) + self.resource = kwargs.get('resource', None) + self.operation = kwargs.get('operation', None) + self.description = kwargs.get('description', None) class PresentationTimeRange(Model): @@ -5963,245 +3429,33 @@ def __init__(self, **kwargs): self.service_specification = None -class Provider(Model): - """A resource provider. - - All required parameters must be populated in order to send to Azure. - - :param provider_name: Required. The provider name. - :type provider_name: str - """ - - _validation = { - 'provider_name': {'required': True}, - } - - _attribute_map = { - 'provider_name': {'key': 'providerName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Provider, self).__init__(**kwargs) - self.provider_name = kwargs.get('provider_name', None) - - -class Rectangle(Model): - """Describes the properties of a rectangular window applied to the input media - before processing it. - - :param left: The number of pixels from the left-margin. This can be - absolute pixel value (e.g 100), or relative to the size of the video (For - example, 50%). - :type left: str - :param top: The number of pixels from the top-margin. This can be absolute - pixel value (e.g 100), or relative to the size of the video (For example, - 50%). - :type top: str - :param width: The width of the rectangular region in pixels. This can be - absolute pixel value (e.g 100), or relative to the size of the video (For - example, 50%). - :type width: str - :param height: The height of the rectangular region in pixels. This can be - absolute pixel value (e.g 100), or relative to the size of the video (For - example, 50%). - :type height: str - """ - - _attribute_map = { - 'left': {'key': 'left', 'type': 'str'}, - 'top': {'key': 'top', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Rectangle, self).__init__(**kwargs) - self.left = kwargs.get('left', None) - self.top = kwargs.get('top', None) - self.width = kwargs.get('width', None) - self.height = kwargs.get('height', None) - - -class SelectAudioTrackByAttribute(AudioTrackDescriptor): - """Select audio tracks from the input by specifying an attribute and an - attribute filter. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channel_mapping: Optional designation for single channel audio - tracks. Can be used to combine the tracks into stereo or multi-channel - audio tracks. Possible values include: 'FrontLeft', 'FrontRight', - 'Center', 'LowFrequencyEffects', 'BackLeft', 'BackRight', 'StereoLeft', - 'StereoRight' - :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping - :param attribute: Required. The TrackAttribute to filter the tracks by. - Possible values include: 'Bitrate', 'Language' - :type attribute: str or ~azure.mgmt.media.models.TrackAttribute - :param filter: Required. The type of AttributeFilter to apply to the - TrackAttribute in order to select the tracks. Possible values include: - 'All', 'Top', 'Bottom', 'ValueEquals' - :type filter: str or ~azure.mgmt.media.models.AttributeFilter - :param filter_value: The value to filter the tracks by. Only used when - AttributeFilter.ValueEquals is specified for the Filter property. - :type filter_value: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'attribute': {'required': True}, - 'filter': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, - 'attribute': {'key': 'attribute', 'type': 'str'}, - 'filter': {'key': 'filter', 'type': 'str'}, - 'filter_value': {'key': 'filterValue', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(SelectAudioTrackByAttribute, self).__init__(**kwargs) - self.attribute = kwargs.get('attribute', None) - self.filter = kwargs.get('filter', None) - self.filter_value = kwargs.get('filter_value', None) - self.odatatype = '#Microsoft.Media.SelectAudioTrackByAttribute' - - -class SelectAudioTrackById(AudioTrackDescriptor): - """Select audio tracks from the input by specifying a track identifier. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channel_mapping: Optional designation for single channel audio - tracks. Can be used to combine the tracks into stereo or multi-channel - audio tracks. Possible values include: 'FrontLeft', 'FrontRight', - 'Center', 'LowFrequencyEffects', 'BackLeft', 'BackRight', 'StereoLeft', - 'StereoRight' - :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping - :param track_id: Required. Track identifier to select - :type track_id: long - """ - - _validation = { - 'odatatype': {'required': True}, - 'track_id': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, - 'track_id': {'key': 'trackId', 'type': 'long'}, - } - - def __init__(self, **kwargs): - super(SelectAudioTrackById, self).__init__(**kwargs) - self.track_id = kwargs.get('track_id', None) - self.odatatype = '#Microsoft.Media.SelectAudioTrackById' - - -class VideoTrackDescriptor(TrackDescriptor): - """A TrackSelection to select video tracks. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SelectVideoTrackByAttribute, SelectVideoTrackById - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.SelectVideoTrackByAttribute': 'SelectVideoTrackByAttribute', '#Microsoft.Media.SelectVideoTrackById': 'SelectVideoTrackById'} - } - - def __init__(self, **kwargs): - super(VideoTrackDescriptor, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.VideoTrackDescriptor' - - -class SelectVideoTrackByAttribute(VideoTrackDescriptor): - """Select video tracks from the input by specifying an attribute and an - attribute filter. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param attribute: Required. The TrackAttribute to filter the tracks by. - Possible values include: 'Bitrate', 'Language' - :type attribute: str or ~azure.mgmt.media.models.TrackAttribute - :param filter: Required. The type of AttributeFilter to apply to the - TrackAttribute in order to select the tracks. Possible values include: - 'All', 'Top', 'Bottom', 'ValueEquals' - :type filter: str or ~azure.mgmt.media.models.AttributeFilter - :param filter_value: The value to filter the tracks by. Only used when - AttributeFilter.ValueEquals is specified for the Filter property. For - TrackAttribute.Bitrate, this should be an integer value in bits per second - (e.g: '1500000'). The TrackAttribute.Language is not supported for video - tracks. - :type filter_value: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'attribute': {'required': True}, - 'filter': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'attribute': {'key': 'attribute', 'type': 'str'}, - 'filter': {'key': 'filter', 'type': 'str'}, - 'filter_value': {'key': 'filterValue', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(SelectVideoTrackByAttribute, self).__init__(**kwargs) - self.attribute = kwargs.get('attribute', None) - self.filter = kwargs.get('filter', None) - self.filter_value = kwargs.get('filter_value', None) - self.odatatype = '#Microsoft.Media.SelectVideoTrackByAttribute' - - -class SelectVideoTrackById(VideoTrackDescriptor): - """Select video tracks from the input by specifying a track identifier. +class ResourceIdentity(Model): + """ResourceIdentity. All required parameters must be populated in order to send to Azure. - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param track_id: Required. Track identifier to select - :type track_id: long + :param user_assigned_identity: The user assigned managed identity's ARM ID + to use when accessing a resource. + :type user_assigned_identity: str + :param use_system_assigned_identity: Required. Indicates whether to use + System Assigned Managed Identity. Mutual exclusive with User Assigned + Managed Identity. + :type use_system_assigned_identity: bool """ _validation = { - 'odatatype': {'required': True}, - 'track_id': {'required': True}, + 'use_system_assigned_identity': {'required': True}, } _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'track_id': {'key': 'trackId', 'type': 'long'}, + 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, + 'use_system_assigned_identity': {'key': 'useSystemAssignedIdentity', 'type': 'bool'}, } def __init__(self, **kwargs): - super(SelectVideoTrackById, self).__init__(**kwargs) - self.track_id = kwargs.get('track_id', None) - self.odatatype = '#Microsoft.Media.SelectVideoTrackById' + super(ResourceIdentity, self).__init__(**kwargs) + self.user_assigned_identity = kwargs.get('user_assigned_identity', None) + self.use_system_assigned_identity = kwargs.get('use_system_assigned_identity', None) class ServiceSpecification(Model): @@ -6234,49 +3488,12 @@ def __init__(self, **kwargs): self.metric_specifications = None -class StandardEncoderPreset(Preset): - """Describes all the settings to be used when encoding the input video with - the Standard Encoder. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param filters: One or more filtering operations that are applied to the - input media before encoding. - :type filters: ~azure.mgmt.media.models.Filters - :param codecs: Required. The list of codecs to be used when encoding the - input video. - :type codecs: list[~azure.mgmt.media.models.Codec] - :param formats: Required. The list of outputs to be produced by the - encoder. - :type formats: list[~azure.mgmt.media.models.Format] - """ - - _validation = { - 'odatatype': {'required': True}, - 'codecs': {'required': True}, - 'formats': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'filters': {'key': 'filters', 'type': 'Filters'}, - 'codecs': {'key': 'codecs', 'type': '[Codec]'}, - 'formats': {'key': 'formats', 'type': '[Format]'}, - } - - def __init__(self, **kwargs): - super(StandardEncoderPreset, self).__init__(**kwargs) - self.filters = kwargs.get('filters', None) - self.codecs = kwargs.get('codecs', None) - self.formats = kwargs.get('formats', None) - self.odatatype = '#Microsoft.Media.StandardEncoderPreset' - - class StorageAccount(Model): """The storage account details. + Variables are only populated by the server, and will be ignored when + sending a request. + All required parameters must be populated in order to send to Azure. :param id: The ID of the storage account resource. Media Services relies @@ -6288,21 +3505,30 @@ class StorageAccount(Model): :param type: Required. The type of the storage account. Possible values include: 'Primary', 'Secondary' :type type: str or ~azure.mgmt.media.models.StorageAccountType + :param identity: The storage account identity. + :type identity: ~azure.mgmt.media.models.ResourceIdentity + :ivar status: The current status of the storage account mapping. + :vartype status: str """ _validation = { 'type': {'required': True}, + 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, + 'status': {'key': 'status', 'type': 'str'}, } def __init__(self, **kwargs): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.type = kwargs.get('type', None) + self.identity = kwargs.get('identity', None) + self.status = None class StorageEncryptedAssetDecryptionData(Model): @@ -6972,314 +4198,29 @@ def __init__(self, **kwargs): self.track_selections = kwargs.get('track_selections', None) -class Transform(ProxyResource): - """A Transform encapsulates the rules or instructions for generating desired - outputs from input media, such as by transcoding or by extracting insights. - After the Transform is created, it can be applied to input media by - creating Jobs. +class UserAssignedManagedIdentity(Model): + """UserAssignedManagedIdentity. Variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to Azure. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - :vartype id: str - :ivar name: The name of the resource - :vartype name: str - :ivar type: The type of the resource. E.g. - "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - :vartype type: str - :ivar created: The UTC date and time when the Transform was created, in - 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype created: datetime - :param description: An optional verbose description of the Transform. - :type description: str - :ivar last_modified: The UTC date and time when the Transform was last - updated, in 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype last_modified: datetime - :param outputs: Required. An array of one or more TransformOutputs that - the Transform should generate. - :type outputs: list[~azure.mgmt.media.models.TransformOutput] - :ivar system_data: The system metadata relating to this resource. - :vartype system_data: ~azure.mgmt.media.models.SystemData - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'created': {'readonly': True}, - 'last_modified': {'readonly': True}, - 'outputs': {'required': True}, - 'system_data': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'created': {'key': 'properties.created', 'type': 'iso-8601'}, - 'description': {'key': 'properties.description', 'type': 'str'}, - 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, - 'outputs': {'key': 'properties.outputs', 'type': '[TransformOutput]'}, - 'system_data': {'key': 'systemData', 'type': 'SystemData'}, - } - - def __init__(self, **kwargs): - super(Transform, self).__init__(**kwargs) - self.created = None - self.description = kwargs.get('description', None) - self.last_modified = None - self.outputs = kwargs.get('outputs', None) - self.system_data = None - - -class TransformOutput(Model): - """Describes the properties of a TransformOutput, which are the rules to be - applied while generating the desired output. - - All required parameters must be populated in order to send to Azure. - - :param on_error: A Transform can define more than one outputs. This - property defines what the service should do when one output fails - either - continue to produce other outputs, or, stop the other outputs. The overall - Job state will not reflect failures of outputs that are specified with - 'ContinueJob'. The default is 'StopProcessingJob'. Possible values - include: 'StopProcessingJob', 'ContinueJob' - :type on_error: str or ~azure.mgmt.media.models.OnErrorType - :param relative_priority: Sets the relative priority of the - TransformOutputs within a Transform. This sets the priority that the - service uses for processing TransformOutputs. The default priority is - Normal. Possible values include: 'Low', 'Normal', 'High' - :type relative_priority: str or ~azure.mgmt.media.models.Priority - :param preset: Required. Preset that describes the operations that will be - used to modify, transcode, or extract insights from the source file to - generate the output. - :type preset: ~azure.mgmt.media.models.Preset - """ - - _validation = { - 'preset': {'required': True}, - } - - _attribute_map = { - 'on_error': {'key': 'onError', 'type': 'str'}, - 'relative_priority': {'key': 'relativePriority', 'type': 'str'}, - 'preset': {'key': 'preset', 'type': 'Preset'}, - } - - def __init__(self, **kwargs): - super(TransformOutput, self).__init__(**kwargs) - self.on_error = kwargs.get('on_error', None) - self.relative_priority = kwargs.get('relative_priority', None) - self.preset = kwargs.get('preset', None) - - -class TransportStreamFormat(MultiBitrateFormat): - """Describes the properties for generating an MPEG-2 Transport Stream (ISO/IEC - 13818-1) output video file(s). - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param output_files: The list of output files to produce. Each entry in - the list is a set of audio and video layer labels to be muxed together . - :type output_files: list[~azure.mgmt.media.models.OutputFile] - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - } - - def __init__(self, **kwargs): - super(TransportStreamFormat, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.TransportStreamFormat' - - -class UtcClipTime(ClipTime): - """Specifies the clip time as a Utc time position in the media file. The Utc - time can point to a different position depending on whether the media file - starts from a timestamp of zero or not. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param time: Required. The time position on the timeline of the input - media based on Utc time. - :type time: datetime - """ - - _validation = { - 'odatatype': {'required': True}, - 'time': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'time': {'key': 'time', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(UtcClipTime, self).__init__(**kwargs) - self.time = kwargs.get('time', None) - self.odatatype = '#Microsoft.Media.UtcClipTime' - - -class VideoAnalyzerPreset(AudioAnalyzerPreset): - """A video analyzer preset that extracts insights (rich metadata) from both - audio and video, and outputs a JSON format file. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param audio_language: The language for the audio payload in the input - using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you - know the language of your content, it is recommended that you specify it. - The language must be specified explicitly for AudioAnalysisMode::Basic, - since automatic language detection is not included in basic mode. If the - language isn't specified or set to null, automatic language detection will - choose the first language detected and process with the selected language - for the duration of the file. It does not currently support dynamically - switching between languages after the first language is detected. The - automatic detection works best with audio recordings with clearly - discernable speech. If automatic detection fails to find the language, - transcription would fallback to 'en-US'." The list of supported languages - is available here: https://go.microsoft.com/fwlink/?linkid=2109463 - :type audio_language: str - :param mode: Determines the set of audio analysis operations to be - performed. If unspecified, the Standard AudioAnalysisMode would be chosen. - Possible values include: 'Standard', 'Basic' - :type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode - :param experimental_options: Dictionary containing key value pairs for - parameters not exposed in the preset itself - :type experimental_options: dict[str, str] - :param insights_to_extract: Defines the type of insights that you want the - service to generate. The allowed values are 'AudioInsightsOnly', - 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you - set this to AllInsights and the input is audio only, then only audio - insights are generated. Similarly if the input is video only, then only - video insights are generated. It is recommended that you not use - AudioInsightsOnly if you expect some of your inputs to be video only; or - use VideoInsightsOnly if you expect some of your inputs to be audio only. - Your Jobs in such conditions would error out. Possible values include: - 'AudioInsightsOnly', 'VideoInsightsOnly', 'AllInsights' - :type insights_to_extract: str or ~azure.mgmt.media.models.InsightsType - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'audio_language': {'key': 'audioLanguage', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, - 'insights_to_extract': {'key': 'insightsToExtract', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(VideoAnalyzerPreset, self).__init__(**kwargs) - self.insights_to_extract = kwargs.get('insights_to_extract', None) - self.odatatype = '#Microsoft.Media.VideoAnalyzerPreset' - - -class VideoOverlay(Overlay): - """Describes the properties of a video overlay. - - All required parameters must be populated in order to send to Azure. - - :param input_label: Required. The label of the job input which is to be - used as an overlay. The Input must specify exactly one file. You can - specify an image file in JPG, PNG, GIF or BMP format, or an audio file - (such as a WAV, MP3, WMA or M4A file), or a video file. See - https://aka.ms/mesformats for the complete list of supported audio and - video file formats. - :type input_label: str - :param start: The start position, with reference to the input video, at - which the overlay starts. The value should be in ISO 8601 format. For - example, PT05S to start the overlay at 5 seconds into the input video. If - not specified the overlay starts from the beginning of the input video. - :type start: timedelta - :param end: The end position, with reference to the input video, at which - the overlay ends. The value should be in ISO 8601 format. For example, - PT30S to end the overlay at 30 seconds into the input video. If not - specified or the value is greater than the input video duration, the - overlay will be applied until the end of the input video if the overlay - media duration is greater than the input video duration, else the overlay - will last as long as the overlay media duration. - :type end: timedelta - :param fade_in_duration: The duration over which the overlay fades in onto - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade in (same as PT0S). - :type fade_in_duration: timedelta - :param fade_out_duration: The duration over which the overlay fades out of - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade out (same as PT0S). - :type fade_out_duration: timedelta - :param audio_gain_level: The gain level of audio in the overlay. The value - should be in the range [0, 1.0]. The default is 1.0. - :type audio_gain_level: float - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param position: The location in the input video where the overlay is - applied. - :type position: ~azure.mgmt.media.models.Rectangle - :param opacity: The opacity of the overlay. This is a value in the range - [0 - 1.0]. Default is 1.0 which mean the overlay is opaque. - :type opacity: float - :param crop_rectangle: An optional rectangular window used to crop the - overlay image or video. - :type crop_rectangle: ~azure.mgmt.media.models.Rectangle + :ivar client_id: The client ID. + :vartype client_id: str + :ivar principal_id: The principal ID. + :vartype principal_id: str """ _validation = { - 'input_label': {'required': True}, - 'odatatype': {'required': True}, + 'client_id': {'readonly': True}, + 'principal_id': {'readonly': True}, } _attribute_map = { - 'input_label': {'key': 'inputLabel', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'duration'}, - 'end': {'key': 'end', 'type': 'duration'}, - 'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'}, - 'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'}, - 'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'position': {'key': 'position', 'type': 'Rectangle'}, - 'opacity': {'key': 'opacity', 'type': 'float'}, - 'crop_rectangle': {'key': 'cropRectangle', 'type': 'Rectangle'}, + 'client_id': {'key': 'clientId', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, } def __init__(self, **kwargs): - super(VideoOverlay, self).__init__(**kwargs) - self.position = kwargs.get('position', None) - self.opacity = kwargs.get('opacity', None) - self.crop_rectangle = kwargs.get('crop_rectangle', None) - self.odatatype = '#Microsoft.Media.VideoOverlay' + super(UserAssignedManagedIdentity, self).__init__(**kwargs) + self.client_id = None + self.principal_id = None diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py index 20d9e7ff377c..754b3aafe053 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py @@ -13,188 +13,6 @@ from msrest.exceptions import HttpOperationError -class Codec(Model): - """Describes the basic properties of all codecs. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Audio, Video, CopyVideo, CopyAudio - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.Audio': 'Audio', '#Microsoft.Media.Video': 'Video', '#Microsoft.Media.CopyVideo': 'CopyVideo', '#Microsoft.Media.CopyAudio': 'CopyAudio'} - } - - def __init__(self, *, label: str=None, **kwargs) -> None: - super(Codec, self).__init__(**kwargs) - self.label = label - self.odatatype = None - - -class Audio(Codec): - """Defines the common properties for all audio codecs. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AacAudio - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channels: The number of channels in the audio. - :type channels: int - :param sampling_rate: The sampling rate to use for encoding in hertz. - :type sampling_rate: int - :param bitrate: The bitrate, in bits per second, of the output encoded - audio. - :type bitrate: int - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channels': {'key': 'channels', 'type': 'int'}, - 'sampling_rate': {'key': 'samplingRate', 'type': 'int'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AacAudio': 'AacAudio'} - } - - def __init__(self, *, label: str=None, channels: int=None, sampling_rate: int=None, bitrate: int=None, **kwargs) -> None: - super(Audio, self).__init__(label=label, **kwargs) - self.channels = channels - self.sampling_rate = sampling_rate - self.bitrate = bitrate - self.odatatype = '#Microsoft.Media.Audio' - - -class AacAudio(Audio): - """Describes Advanced Audio Codec (AAC) audio encoding settings. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channels: The number of channels in the audio. - :type channels: int - :param sampling_rate: The sampling rate to use for encoding in hertz. - :type sampling_rate: int - :param bitrate: The bitrate, in bits per second, of the output encoded - audio. - :type bitrate: int - :param profile: The encoding profile to be used when encoding audio with - AAC. Possible values include: 'AacLc', 'HeAacV1', 'HeAacV2' - :type profile: str or ~azure.mgmt.media.models.AacAudioProfile - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channels': {'key': 'channels', 'type': 'int'}, - 'sampling_rate': {'key': 'samplingRate', 'type': 'int'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'profile': {'key': 'profile', 'type': 'str'}, - } - - def __init__(self, *, label: str=None, channels: int=None, sampling_rate: int=None, bitrate: int=None, profile=None, **kwargs) -> None: - super(AacAudio, self).__init__(label=label, channels=channels, sampling_rate=sampling_rate, bitrate=bitrate, **kwargs) - self.profile = profile - self.odatatype = '#Microsoft.Media.AacAudio' - - -class ClipTime(Model): - """Base class for specifying a clip time. Use sub classes of this class to - specify the time position in the media. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AbsoluteClipTime, UtcClipTime - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AbsoluteClipTime': 'AbsoluteClipTime', '#Microsoft.Media.UtcClipTime': 'UtcClipTime'} - } - - def __init__(self, **kwargs) -> None: - super(ClipTime, self).__init__(**kwargs) - self.odatatype = None - - -class AbsoluteClipTime(ClipTime): - """Specifies the clip time as an absolute time position in the media file. - The absolute time can point to a different position depending on whether - the media file starts from a timestamp of zero or not. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param time: Required. The time position on the timeline of the input - media. It is usually specified as an ISO8601 period. e.g PT30S for 30 - seconds. - :type time: timedelta - """ - - _validation = { - 'odatatype': {'required': True}, - 'time': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'time': {'key': 'time', 'type': 'duration'}, - } - - def __init__(self, *, time, **kwargs) -> None: - super(AbsoluteClipTime, self).__init__(**kwargs) - self.time = time - self.odatatype = '#Microsoft.Media.AbsoluteClipTime' - - class AccessControl(Model): """AccessControl. @@ -221,6 +39,9 @@ def __init__(self, *, default_action=None, ip_allow_list=None, **kwargs) -> None class AccountEncryption(Model): """AccountEncryption. + Variables are only populated by the server, and will be ignored when + sending a request. + All required parameters must be populated in order to send to Azure. :param type: Required. The type of key used to encrypt the Account Key. @@ -229,21 +50,30 @@ class AccountEncryption(Model): :param key_vault_properties: The properties of the key used to encrypt the account. :type key_vault_properties: ~azure.mgmt.media.models.KeyVaultProperties + :param identity: The Key Vault identity. + :type identity: ~azure.mgmt.media.models.ResourceIdentity + :ivar status: The current status of the Key Vault mapping. + :vartype status: str """ _validation = { 'type': {'required': True}, + 'status': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, + 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, + 'status': {'key': 'status', 'type': 'str'}, } - def __init__(self, *, type, key_vault_properties=None, **kwargs) -> None: + def __init__(self, *, type, key_vault_properties=None, identity=None, **kwargs) -> None: super(AccountEncryption, self).__init__(**kwargs) self.type = type self.key_vault_properties = key_vault_properties + self.identity = identity + self.status = None class Resource(Model): @@ -411,34 +241,6 @@ def __init__(self, *, identifier: str=None, base64_key: str=None, expiration=Non self.expiration = expiration -class ApiError(Model): - """The API error. - - :param error: The error properties. - :type error: ~azure.mgmt.media.models.ODataError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'ODataError'}, - } - - def __init__(self, *, error=None, **kwargs) -> None: - super(ApiError, self).__init__(**kwargs) - self.error = error - - -class ApiErrorException(HttpOperationError): - """Server responsed with exception of type: 'ApiError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(ApiErrorException, self).__init__(deserialize, response, 'ApiError', *args) - - class Asset(ProxyResource): """An Asset. @@ -671,435 +473,110 @@ def __init__(self, **kwargs) -> None: self.default_content_key_policy_name = None -class Preset(Model): - """Base type for all Presets, which define the recipe or instructions on how - the input media files should be processed. +class AzureEntityResource(Resource): + """Entity Resource. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FaceDetectorPreset, AudioAnalyzerPreset, - BuiltInStandardEncoderPreset, StandardEncoderPreset + The resource model definition for an Azure Resource Manager resource with + an etag. - All required parameters must be populated in order to send to Azure. + Variables are only populated by the server, and will be ignored when + sending a request. - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + :vartype id: str + :ivar name: The name of the resource + :vartype name: str + :ivar type: The type of the resource. E.g. + "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + :vartype type: str + :ivar etag: Resource Etag. + :vartype etag: str """ _validation = { - 'odatatype': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, } _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.FaceDetectorPreset': 'FaceDetectorPreset', '#Microsoft.Media.AudioAnalyzerPreset': 'AudioAnalyzerPreset', '#Microsoft.Media.BuiltInStandardEncoderPreset': 'BuiltInStandardEncoderPreset', '#Microsoft.Media.StandardEncoderPreset': 'StandardEncoderPreset'} + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, **kwargs) -> None: - super(Preset, self).__init__(**kwargs) - self.odatatype = None - - -class AudioAnalyzerPreset(Preset): - """The Audio Analyzer preset applies a pre-defined set of AI-based analysis - operations, including speech transcription. Currently, the preset supports - processing of content with a single audio track. + super(AzureEntityResource, self).__init__(**kwargs) + self.etag = None - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: VideoAnalyzerPreset - All required parameters must be populated in order to send to Azure. +class CbcsDrmConfiguration(Model): + """Class to specify DRM configurations of CommonEncryptionCbcs scheme in + Streaming Policy. - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param audio_language: The language for the audio payload in the input - using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you - know the language of your content, it is recommended that you specify it. - The language must be specified explicitly for AudioAnalysisMode::Basic, - since automatic language detection is not included in basic mode. If the - language isn't specified or set to null, automatic language detection will - choose the first language detected and process with the selected language - for the duration of the file. It does not currently support dynamically - switching between languages after the first language is detected. The - automatic detection works best with audio recordings with clearly - discernable speech. If automatic detection fails to find the language, - transcription would fallback to 'en-US'." The list of supported languages - is available here: https://go.microsoft.com/fwlink/?linkid=2109463 - :type audio_language: str - :param mode: Determines the set of audio analysis operations to be - performed. If unspecified, the Standard AudioAnalysisMode would be chosen. - Possible values include: 'Standard', 'Basic' - :type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode - :param experimental_options: Dictionary containing key value pairs for - parameters not exposed in the preset itself - :type experimental_options: dict[str, str] + :param fair_play: FairPlay configurations + :type fair_play: + ~azure.mgmt.media.models.StreamingPolicyFairPlayConfiguration + :param play_ready: PlayReady configurations + :type play_ready: + ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration + :param widevine: Widevine configurations + :type widevine: + ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration """ - _validation = { - 'odatatype': {'required': True}, - } - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'audio_language': {'key': 'audioLanguage', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.VideoAnalyzerPreset': 'VideoAnalyzerPreset'} + 'fair_play': {'key': 'fairPlay', 'type': 'StreamingPolicyFairPlayConfiguration'}, + 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, + 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, } - def __init__(self, *, audio_language: str=None, mode=None, experimental_options=None, **kwargs) -> None: - super(AudioAnalyzerPreset, self).__init__(**kwargs) - self.audio_language = audio_language - self.mode = mode - self.experimental_options = experimental_options - self.odatatype = '#Microsoft.Media.AudioAnalyzerPreset' - - -class Overlay(Model): - """Base type for all overlays - image, audio or video. + def __init__(self, *, fair_play=None, play_ready=None, widevine=None, **kwargs) -> None: + super(CbcsDrmConfiguration, self).__init__(**kwargs) + self.fair_play = fair_play + self.play_ready = play_ready + self.widevine = widevine - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AudioOverlay, VideoOverlay - All required parameters must be populated in order to send to Azure. +class CencDrmConfiguration(Model): + """Class to specify DRM configurations of CommonEncryptionCenc scheme in + Streaming Policy. - :param input_label: Required. The label of the job input which is to be - used as an overlay. The Input must specify exactly one file. You can - specify an image file in JPG, PNG, GIF or BMP format, or an audio file - (such as a WAV, MP3, WMA or M4A file), or a video file. See - https://aka.ms/mesformats for the complete list of supported audio and - video file formats. - :type input_label: str - :param start: The start position, with reference to the input video, at - which the overlay starts. The value should be in ISO 8601 format. For - example, PT05S to start the overlay at 5 seconds into the input video. If - not specified the overlay starts from the beginning of the input video. - :type start: timedelta - :param end: The end position, with reference to the input video, at which - the overlay ends. The value should be in ISO 8601 format. For example, - PT30S to end the overlay at 30 seconds into the input video. If not - specified or the value is greater than the input video duration, the - overlay will be applied until the end of the input video if the overlay - media duration is greater than the input video duration, else the overlay - will last as long as the overlay media duration. - :type end: timedelta - :param fade_in_duration: The duration over which the overlay fades in onto - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade in (same as PT0S). - :type fade_in_duration: timedelta - :param fade_out_duration: The duration over which the overlay fades out of - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade out (same as PT0S). - :type fade_out_duration: timedelta - :param audio_gain_level: The gain level of audio in the overlay. The value - should be in the range [0, 1.0]. The default is 1.0. - :type audio_gain_level: float - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param play_ready: PlayReady configurations + :type play_ready: + ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration + :param widevine: Widevine configurations + :type widevine: + ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration """ - _validation = { - 'input_label': {'required': True}, - 'odatatype': {'required': True}, - } - _attribute_map = { - 'input_label': {'key': 'inputLabel', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'duration'}, - 'end': {'key': 'end', 'type': 'duration'}, - 'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'}, - 'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'}, - 'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AudioOverlay': 'AudioOverlay', '#Microsoft.Media.VideoOverlay': 'VideoOverlay'} + 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, + 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, } - def __init__(self, *, input_label: str, start=None, end=None, fade_in_duration=None, fade_out_duration=None, audio_gain_level: float=None, **kwargs) -> None: - super(Overlay, self).__init__(**kwargs) - self.input_label = input_label - self.start = start - self.end = end - self.fade_in_duration = fade_in_duration - self.fade_out_duration = fade_out_duration - self.audio_gain_level = audio_gain_level - self.odatatype = None - + def __init__(self, *, play_ready=None, widevine=None, **kwargs) -> None: + super(CencDrmConfiguration, self).__init__(**kwargs) + self.play_ready = play_ready + self.widevine = widevine -class AudioOverlay(Overlay): - """Describes the properties of an audio overlay. - All required parameters must be populated in order to send to Azure. +class CheckNameAvailabilityInput(Model): + """The input to the check name availability request. - :param input_label: Required. The label of the job input which is to be - used as an overlay. The Input must specify exactly one file. You can - specify an image file in JPG, PNG, GIF or BMP format, or an audio file - (such as a WAV, MP3, WMA or M4A file), or a video file. See - https://aka.ms/mesformats for the complete list of supported audio and - video file formats. - :type input_label: str - :param start: The start position, with reference to the input video, at - which the overlay starts. The value should be in ISO 8601 format. For - example, PT05S to start the overlay at 5 seconds into the input video. If - not specified the overlay starts from the beginning of the input video. - :type start: timedelta - :param end: The end position, with reference to the input video, at which - the overlay ends. The value should be in ISO 8601 format. For example, - PT30S to end the overlay at 30 seconds into the input video. If not - specified or the value is greater than the input video duration, the - overlay will be applied until the end of the input video if the overlay - media duration is greater than the input video duration, else the overlay - will last as long as the overlay media duration. - :type end: timedelta - :param fade_in_duration: The duration over which the overlay fades in onto - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade in (same as PT0S). - :type fade_in_duration: timedelta - :param fade_out_duration: The duration over which the overlay fades out of - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade out (same as PT0S). - :type fade_out_duration: timedelta - :param audio_gain_level: The gain level of audio in the overlay. The value - should be in the range [0, 1.0]. The default is 1.0. - :type audio_gain_level: float - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param name: The account name. + :type name: str + :param type: The account type. For a Media Services account, this should + be 'MediaServices'. + :type type: str """ - _validation = { - 'input_label': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'input_label': {'key': 'inputLabel', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'duration'}, - 'end': {'key': 'end', 'type': 'duration'}, - 'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'}, - 'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'}, - 'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, *, input_label: str, start=None, end=None, fade_in_duration=None, fade_out_duration=None, audio_gain_level: float=None, **kwargs) -> None: - super(AudioOverlay, self).__init__(input_label=input_label, start=start, end=end, fade_in_duration=fade_in_duration, fade_out_duration=fade_out_duration, audio_gain_level=audio_gain_level, **kwargs) - self.odatatype = '#Microsoft.Media.AudioOverlay' - - -class TrackDescriptor(Model): - """Base type for all TrackDescriptor types, which define the metadata and - selection for tracks that should be processed by a Job. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: AudioTrackDescriptor, VideoTrackDescriptor - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.AudioTrackDescriptor': 'AudioTrackDescriptor', '#Microsoft.Media.VideoTrackDescriptor': 'VideoTrackDescriptor'} - } - - def __init__(self, **kwargs) -> None: - super(TrackDescriptor, self).__init__(**kwargs) - self.odatatype = None - - -class AudioTrackDescriptor(TrackDescriptor): - """A TrackSelection to select audio tracks. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SelectAudioTrackByAttribute, SelectAudioTrackById - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channel_mapping: Optional designation for single channel audio - tracks. Can be used to combine the tracks into stereo or multi-channel - audio tracks. Possible values include: 'FrontLeft', 'FrontRight', - 'Center', 'LowFrequencyEffects', 'BackLeft', 'BackRight', 'StereoLeft', - 'StereoRight' - :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.SelectAudioTrackByAttribute': 'SelectAudioTrackByAttribute', '#Microsoft.Media.SelectAudioTrackById': 'SelectAudioTrackById'} - } - - def __init__(self, *, channel_mapping=None, **kwargs) -> None: - super(AudioTrackDescriptor, self).__init__(**kwargs) - self.channel_mapping = channel_mapping - self.odatatype = '#Microsoft.Media.AudioTrackDescriptor' - - -class AzureEntityResource(Resource): - """Entity Resource. - - The resource model definition for an Azure Resource Manager resource with - an etag. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - :vartype id: str - :ivar name: The name of the resource - :vartype name: str - :ivar type: The type of the resource. E.g. - "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - :vartype type: str - :ivar etag: Resource Etag. - :vartype etag: str - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - } - - def __init__(self, **kwargs) -> None: - super(AzureEntityResource, self).__init__(**kwargs) - self.etag = None - - -class BuiltInStandardEncoderPreset(Preset): - """Describes a built-in preset for encoding the input video with the Standard - Encoder. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param preset_name: Required. The built-in preset to be used for encoding - videos. Possible values include: 'H264SingleBitrateSD', - 'H264SingleBitrate720p', 'H264SingleBitrate1080p', 'AdaptiveStreaming', - 'AACGoodQualityAudio', 'ContentAwareEncodingExperimental', - 'ContentAwareEncoding', 'CopyAllBitrateNonInterleaved', - 'H264MultipleBitrate1080p', 'H264MultipleBitrate720p', - 'H264MultipleBitrateSD', 'H265ContentAwareEncoding', - 'H265AdaptiveStreaming', 'H265SingleBitrate720p', - 'H265SingleBitrate1080p', 'H265SingleBitrate4K' - :type preset_name: str or ~azure.mgmt.media.models.EncoderNamedPreset - """ - - _validation = { - 'odatatype': {'required': True}, - 'preset_name': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'preset_name': {'key': 'presetName', 'type': 'str'}, - } - - def __init__(self, *, preset_name, **kwargs) -> None: - super(BuiltInStandardEncoderPreset, self).__init__(**kwargs) - self.preset_name = preset_name - self.odatatype = '#Microsoft.Media.BuiltInStandardEncoderPreset' - - -class CbcsDrmConfiguration(Model): - """Class to specify DRM configurations of CommonEncryptionCbcs scheme in - Streaming Policy. - - :param fair_play: FairPlay configurations - :type fair_play: - ~azure.mgmt.media.models.StreamingPolicyFairPlayConfiguration - :param play_ready: PlayReady configurations - :type play_ready: - ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration - :param widevine: Widevine configurations - :type widevine: - ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration - """ - - _attribute_map = { - 'fair_play': {'key': 'fairPlay', 'type': 'StreamingPolicyFairPlayConfiguration'}, - 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, - 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, - } - - def __init__(self, *, fair_play=None, play_ready=None, widevine=None, **kwargs) -> None: - super(CbcsDrmConfiguration, self).__init__(**kwargs) - self.fair_play = fair_play - self.play_ready = play_ready - self.widevine = widevine - - -class CencDrmConfiguration(Model): - """Class to specify DRM configurations of CommonEncryptionCenc scheme in - Streaming Policy. - - :param play_ready: PlayReady configurations - :type play_ready: - ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration - :param widevine: Widevine configurations - :type widevine: - ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration - """ - - _attribute_map = { - 'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'}, - 'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'}, - } - - def __init__(self, *, play_ready=None, widevine=None, **kwargs) -> None: - super(CencDrmConfiguration, self).__init__(**kwargs) - self.play_ready = play_ready - self.widevine = widevine - - -class CheckNameAvailabilityInput(Model): - """The input to the check name availability request. - - :param name: The account name. - :type name: str - :param type: The account type. For a Media Services account, this should - be 'MediaServices'. - :type type: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None: @@ -2096,59 +1573,6 @@ def __init__(self, *, raw_body: bytearray, **kwargs) -> None: self.odatatype = '#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey' -class CopyAudio(Codec): - """A codec flag, which tells the encoder to copy the input audio bitstream. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, *, label: str=None, **kwargs) -> None: - super(CopyAudio, self).__init__(label=label, **kwargs) - self.odatatype = '#Microsoft.Media.CopyAudio' - - -class CopyVideo(Codec): - """A codec flag, which tells the encoder to copy the input video bitstream - without re-encoding. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, *, label: str=None, **kwargs) -> None: - super(CopyVideo, self).__init__(label=label, **kwargs) - self.odatatype = '#Microsoft.Media.CopyVideo' - - class CrossSiteAccessPolicies(Model): """The client access policy. @@ -2193,28 +1617,6 @@ def __init__(self, *, label: str=None, policy_name: str=None, **kwargs) -> None: self.policy_name = policy_name -class Deinterlace(Model): - """Describes the de-interlacing settings. - - :param parity: The field parity for de-interlacing, defaults to Auto. - Possible values include: 'Auto', 'TopFieldFirst', 'BottomFieldFirst' - :type parity: str or ~azure.mgmt.media.models.DeinterlaceParity - :param mode: The deinterlacing mode. Defaults to AutoPixelAdaptive. - Possible values include: 'Off', 'AutoPixelAdaptive' - :type mode: str or ~azure.mgmt.media.models.DeinterlaceMode - """ - - _attribute_map = { - 'parity': {'key': 'parity', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__(self, *, parity=None, mode=None, **kwargs) -> None: - super(Deinterlace, self).__init__(**kwargs) - self.parity = parity - self.mode = mode - - class EdgePolicies(Model): """EdgePolicies. @@ -2393,1889 +1795,248 @@ def __init__(self, *, enabled_protocols=None, clear_tracks=None, content_keys=No self.custom_key_acquisition_url_template = custom_key_acquisition_url_template -class FaceDetectorPreset(Preset): - """Describes all the settings to be used when analyzing a video in order to - detect (and optionally redact) all the faces present. +class ErrorAdditionalInfo(Model): + """The resource management error additional info. - All required parameters must be populated in order to send to Azure. + Variables are only populated by the server, and will be ignored when + sending a request. - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param resolution: Specifies the maximum resolution at which your video is - analyzed. The default behavior is "SourceResolution," which will keep the - input video at its original resolution when analyzed. Using - "StandardDefinition" will resize input videos to standard definition while - preserving the appropriate aspect ratio. It will only resize if the video - is of higher resolution. For example, a 1920x1080 input would be scaled to - 640x360 before processing. Switching to "StandardDefinition" will reduce - the time it takes to process high resolution video. It may also reduce the - cost of using this component (see - https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics - for details). However, faces that end up being too small in the resized - video may not be detected. Possible values include: 'SourceResolution', - 'StandardDefinition' - :type resolution: str or ~azure.mgmt.media.models.AnalysisResolution - :param mode: This mode provides the ability to choose between the - following settings: 1) Analyze - For detection only.This mode generates a - metadata JSON file marking appearances of faces throughout the video.Where - possible, appearances of the same person are assigned the same ID. 2) - Combined - Additionally redacts(blurs) detected faces. 3) Redact - This - enables a 2-pass process, allowing for selective redaction of a subset of - detected faces.It takes in the metadata file from a prior analyze pass, - along with the source video, and a user-selected subset of IDs that - require redaction. Possible values include: 'Analyze', 'Redact', - 'Combined' - :type mode: str or ~azure.mgmt.media.models.FaceRedactorMode - :param blur_type: Blur type. Possible values include: 'Box', 'Low', 'Med', - 'High', 'Black' - :type blur_type: str or ~azure.mgmt.media.models.BlurType - :param experimental_options: Dictionary containing key value pairs for - parameters not exposed in the preset itself - :type experimental_options: dict[str, str] + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: object """ _validation = { - 'odatatype': {'required': True}, + 'type': {'readonly': True}, + 'info': {'readonly': True}, } _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - 'blur_type': {'key': 'blurType', 'type': 'str'}, - 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, - } - - def __init__(self, *, resolution=None, mode=None, blur_type=None, experimental_options=None, **kwargs) -> None: - super(FaceDetectorPreset, self).__init__(**kwargs) - self.resolution = resolution - self.mode = mode - self.blur_type = blur_type - self.experimental_options = experimental_options - self.odatatype = '#Microsoft.Media.FaceDetectorPreset' - - -class Filters(Model): - """Describes all the filtering operations, such as de-interlacing, rotation - etc. that are to be applied to the input media before encoding. - - :param deinterlace: The de-interlacing settings. - :type deinterlace: ~azure.mgmt.media.models.Deinterlace - :param rotation: The rotation, if any, to be applied to the input video, - before it is encoded. Default is Auto. Possible values include: 'Auto', - 'None', 'Rotate0', 'Rotate90', 'Rotate180', 'Rotate270' - :type rotation: str or ~azure.mgmt.media.models.Rotation - :param crop: The parameters for the rectangular window with which to crop - the input video. - :type crop: ~azure.mgmt.media.models.Rectangle - :param overlays: The properties of overlays to be applied to the input - video. These could be audio, image or video overlays. - :type overlays: list[~azure.mgmt.media.models.Overlay] - """ - - _attribute_map = { - 'deinterlace': {'key': 'deinterlace', 'type': 'Deinterlace'}, - 'rotation': {'key': 'rotation', 'type': 'str'}, - 'crop': {'key': 'crop', 'type': 'Rectangle'}, - 'overlays': {'key': 'overlays', 'type': '[Overlay]'}, + 'type': {'key': 'type', 'type': 'str'}, + 'info': {'key': 'info', 'type': 'object'}, } - def __init__(self, *, deinterlace=None, rotation=None, crop=None, overlays=None, **kwargs) -> None: - super(Filters, self).__init__(**kwargs) - self.deinterlace = deinterlace - self.rotation = rotation - self.crop = crop - self.overlays = overlays + def __init__(self, **kwargs) -> None: + super(ErrorAdditionalInfo, self).__init__(**kwargs) + self.type = None + self.info = None -class FilterTrackPropertyCondition(Model): - """The class to specify one track property condition. +class ErrorDetail(Model): + """The error detail. - All required parameters must be populated in order to send to Azure. + Variables are only populated by the server, and will be ignored when + sending a request. - :param property: Required. The track property type. Possible values - include: 'Unknown', 'Type', 'Name', 'Language', 'FourCC', 'Bitrate' - :type property: str or ~azure.mgmt.media.models.FilterTrackPropertyType - :param value: Required. The track property value. - :type value: str - :param operation: Required. The track property condition operation. - Possible values include: 'Equal', 'NotEqual' - :type operation: str or - ~azure.mgmt.media.models.FilterTrackPropertyCompareOperation - """ - - _validation = { - 'property': {'required': True}, - 'value': {'required': True}, - 'operation': {'required': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - 'operation': {'key': 'operation', 'type': 'str'}, - } - - def __init__(self, *, property, value: str, operation, **kwargs) -> None: - super(FilterTrackPropertyCondition, self).__init__(**kwargs) - self.property = property - self.value = value - self.operation = operation - - -class FilterTrackSelection(Model): - """Representing a list of FilterTrackPropertyConditions to select a track. - The filters are combined using a logical AND operation. - - All required parameters must be populated in order to send to Azure. - - :param track_selections: Required. The track selections. - :type track_selections: - list[~azure.mgmt.media.models.FilterTrackPropertyCondition] - """ - - _validation = { - 'track_selections': {'required': True}, - } - - _attribute_map = { - 'track_selections': {'key': 'trackSelections', 'type': '[FilterTrackPropertyCondition]'}, - } - - def __init__(self, *, track_selections, **kwargs) -> None: - super(FilterTrackSelection, self).__init__(**kwargs) - self.track_selections = track_selections - - -class FirstQuality(Model): - """Filter First Quality. - - All required parameters must be populated in order to send to Azure. - - :param bitrate: Required. The first quality bitrate. - :type bitrate: int - """ - - _validation = { - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - } - - def __init__(self, *, bitrate: int, **kwargs) -> None: - super(FirstQuality, self).__init__(**kwargs) - self.bitrate = bitrate - - -class Format(Model): - """Base class for output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ImageFormat, MultiBitrateFormat - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'} - } - - def __init__(self, *, filename_pattern: str, **kwargs) -> None: - super(Format, self).__init__(**kwargs) - self.filename_pattern = filename_pattern - self.odatatype = None - - -class InputDefinition(Model): - """Base class for defining an input. Use sub classes of this class to specify - tracks selections and related metadata. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: FromAllInputFile, FromEachInputFile, InputFile - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.FromAllInputFile': 'FromAllInputFile', '#Microsoft.Media.FromEachInputFile': 'FromEachInputFile', '#Microsoft.Media.InputFile': 'InputFile'} - } - - def __init__(self, *, included_tracks=None, **kwargs) -> None: - super(InputDefinition, self).__init__(**kwargs) - self.included_tracks = included_tracks - self.odatatype = None - - -class FromAllInputFile(InputDefinition): - """An InputDefinition that looks across all of the files provided to select - tracks specified by the IncludedTracks property. Generally used with the - AudioTrackByAttribute and VideoTrackByAttribute to allow selection of a - single track across a set of input files. - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, *, included_tracks=None, **kwargs) -> None: - super(FromAllInputFile, self).__init__(included_tracks=included_tracks, **kwargs) - self.odatatype = '#Microsoft.Media.FromAllInputFile' - - -class FromEachInputFile(InputDefinition): - """An InputDefinition that looks at each input file provided to select tracks - specified by the IncludedTracks property. Generally used with the - AudioTrackByAttribute and VideoTrackByAttribute to select tracks from each - file given. - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, *, included_tracks=None, **kwargs) -> None: - super(FromEachInputFile, self).__init__(included_tracks=included_tracks, **kwargs) - self.odatatype = '#Microsoft.Media.FromEachInputFile' - - -class Layer(Model): - """The encoder can be configured to produce video and/or images (thumbnails) - at different resolutions, by specifying a layer for each desired - resolution. A layer represents the properties for the video or image at a - resolution. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H265VideoLayer, VideoLayer, JpgLayer, PngLayer - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H265VideoLayer': 'H265VideoLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer'} - } - - def __init__(self, *, width: str=None, height: str=None, label: str=None, **kwargs) -> None: - super(Layer, self).__init__(**kwargs) - self.width = width - self.height = height - self.label = label - self.odatatype = None - - -class VideoLayer(Layer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H264Layer - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. This is a required - field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Whether or not adaptive B-frames are to be used - when encoding this layer. If not specified, the encoder will turn it on - whenever the video profile permits its use. - :type adaptive_bframe: bool - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H264Layer': 'H264Layer'} - } - - def __init__(self, *, bitrate: int, width: str=None, height: str=None, label: str=None, max_bitrate: int=None, b_frames: int=None, frame_rate: str=None, slices: int=None, adaptive_bframe: bool=None, **kwargs) -> None: - super(VideoLayer, self).__init__(width=width, height=height, label=label, **kwargs) - self.bitrate = bitrate - self.max_bitrate = max_bitrate - self.b_frames = b_frames - self.frame_rate = frame_rate - self.slices = slices - self.adaptive_bframe = adaptive_bframe - self.odatatype = '#Microsoft.Media.VideoLayer' - - -class H264Layer(VideoLayer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer with the H.264 video codec. - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. This is a required - field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Whether or not adaptive B-frames are to be used - when encoding this layer. If not specified, the encoder will turn it on - whenever the video profile permits its use. - :type adaptive_bframe: bool - :param profile: We currently support Baseline, Main, High, High422, - High444. Default is Auto. Possible values include: 'Auto', 'Baseline', - 'Main', 'High', 'High422', 'High444' - :type profile: str or ~azure.mgmt.media.models.H264VideoProfile - :param level: We currently support Level up to 6.2. The value can be Auto, - or a number that matches the H.264 profile. If not specified, the default - is Auto, which lets the encoder choose the Level that is appropriate for - this layer. - :type level: str - :param buffer_window: The VBV buffer window length. The value should be in - ISO 8601 format. The value should be in the range [0.1-100] seconds. The - default is 5 seconds (for example, PT5S). - :type buffer_window: timedelta - :param reference_frames: The number of reference frames to be used when - encoding this layer. If not specified, the encoder determines an - appropriate number based on the encoder complexity setting. - :type reference_frames: int - :param entropy_mode: The entropy mode to be used for this layer. If not - specified, the encoder chooses the mode that is appropriate for the - profile and level. Possible values include: 'Cabac', 'Cavlc' - :type entropy_mode: str or ~azure.mgmt.media.models.EntropyMode - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - 'profile': {'key': 'profile', 'type': 'str'}, - 'level': {'key': 'level', 'type': 'str'}, - 'buffer_window': {'key': 'bufferWindow', 'type': 'duration'}, - 'reference_frames': {'key': 'referenceFrames', 'type': 'int'}, - 'entropy_mode': {'key': 'entropyMode', 'type': 'str'}, - } - - def __init__(self, *, bitrate: int, width: str=None, height: str=None, label: str=None, max_bitrate: int=None, b_frames: int=None, frame_rate: str=None, slices: int=None, adaptive_bframe: bool=None, profile=None, level: str=None, buffer_window=None, reference_frames: int=None, entropy_mode=None, **kwargs) -> None: - super(H264Layer, self).__init__(width=width, height=height, label=label, bitrate=bitrate, max_bitrate=max_bitrate, b_frames=b_frames, frame_rate=frame_rate, slices=slices, adaptive_bframe=adaptive_bframe, **kwargs) - self.profile = profile - self.level = level - self.buffer_window = buffer_window - self.reference_frames = reference_frames - self.entropy_mode = entropy_mode - self.odatatype = '#Microsoft.Media.H264Layer' - - -class Video(Codec): - """Describes the basic properties for encoding the input video. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H265Video, Image, H264Video - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H265Video': 'H265Video', '#Microsoft.Media.Image': 'Image', '#Microsoft.Media.H264Video': 'H264Video'} - } - - def __init__(self, *, label: str=None, key_frame_interval=None, stretch_mode=None, sync_mode=None, **kwargs) -> None: - super(Video, self).__init__(label=label, **kwargs) - self.key_frame_interval = key_frame_interval - self.stretch_mode = stretch_mode - self.sync_mode = sync_mode - self.odatatype = '#Microsoft.Media.Video' - - -class H264Video(Video): - """Describes all the properties for encoding a video with the H.264 codec. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param scene_change_detection: Whether or not the encoder should insert - key frames at scene changes. If not specified, the default is false. This - flag should be set to true only when the encoder is being configured to - produce a single output video. - :type scene_change_detection: bool - :param complexity: Tells the encoder how to choose its encoding settings. - The default value is Balanced. Possible values include: 'Speed', - 'Balanced', 'Quality' - :type complexity: str or ~azure.mgmt.media.models.H264Complexity - :param layers: The collection of output H.264 layers to be produced by the - encoder. - :type layers: list[~azure.mgmt.media.models.H264Layer] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'}, - 'complexity': {'key': 'complexity', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[H264Layer]'}, - } - - def __init__(self, *, label: str=None, key_frame_interval=None, stretch_mode=None, sync_mode=None, scene_change_detection: bool=None, complexity=None, layers=None, **kwargs) -> None: - super(H264Video, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, **kwargs) - self.scene_change_detection = scene_change_detection - self.complexity = complexity - self.layers = layers - self.odatatype = '#Microsoft.Media.H264Video' - - -class H265VideoLayer(Layer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H265Layer - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. For example: a - target bitrate of 3000Kbps or 3Mbps means this value should be 3000000 - This is a required field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Specifies whether or not adaptive B-frames are to - be used when encoding this layer. If not specified, the encoder will turn - it on whenever the video profile permits its use. - :type adaptive_bframe: bool - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.H265Layer': 'H265Layer'} - } - - def __init__(self, *, bitrate: int, width: str=None, height: str=None, label: str=None, max_bitrate: int=None, b_frames: int=None, frame_rate: str=None, slices: int=None, adaptive_bframe: bool=None, **kwargs) -> None: - super(H265VideoLayer, self).__init__(width=width, height=height, label=label, **kwargs) - self.bitrate = bitrate - self.max_bitrate = max_bitrate - self.b_frames = b_frames - self.frame_rate = frame_rate - self.slices = slices - self.adaptive_bframe = adaptive_bframe - self.odatatype = '#Microsoft.Media.H265VideoLayer' - - -class H265Layer(H265VideoLayer): - """Describes the settings to be used when encoding the input video into a - desired output bitrate layer with the H.265 video codec. - - All required parameters must be populated in order to send to Azure. - - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param bitrate: Required. The average bitrate in bits per second at which - to encode the input video when generating this layer. For example: a - target bitrate of 3000Kbps or 3Mbps means this value should be 3000000 - This is a required field. - :type bitrate: int - :param max_bitrate: The maximum bitrate (in bits per second), at which the - VBV buffer should be assumed to refill. If not specified, defaults to the - same value as bitrate. - :type max_bitrate: int - :param b_frames: The number of B-frames to be used when encoding this - layer. If not specified, the encoder chooses an appropriate number based - on the video profile and level. - :type b_frames: int - :param frame_rate: The frame rate (in frames per second) at which to - encode this layer. The value can be in the form of M/N where M and N are - integers (For example, 30000/1001), or in the form of a number (For - example, 30, or 29.97). The encoder enforces constraints on allowed frame - rates based on the profile and level. If it is not specified, the encoder - will use the same frame rate as the input video. - :type frame_rate: str - :param slices: The number of slices to be used when encoding this layer. - If not specified, default is zero, which means that encoder will use a - single slice for each frame. - :type slices: int - :param adaptive_bframe: Specifies whether or not adaptive B-frames are to - be used when encoding this layer. If not specified, the encoder will turn - it on whenever the video profile permits its use. - :type adaptive_bframe: bool - :param profile: We currently support Main. Default is Auto. Possible - values include: 'Auto', 'Main' - :type profile: str or ~azure.mgmt.media.models.H265VideoProfile - :param level: We currently support Level up to 6.2. The value can be Auto, - or a number that matches the H.265 profile. If not specified, the default - is Auto, which lets the encoder choose the Level that is appropriate for - this layer. - :type level: str - :param buffer_window: The VBV buffer window length. The value should be in - ISO 8601 format. The value should be in the range [0.1-100] seconds. The - default is 5 seconds (for example, PT5S). - :type buffer_window: timedelta - :param reference_frames: The number of reference frames to be used when - encoding this layer. If not specified, the encoder determines an - appropriate number based on the encoder complexity setting. - :type reference_frames: int - """ - - _validation = { - 'odatatype': {'required': True}, - 'bitrate': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'bitrate': {'key': 'bitrate', 'type': 'int'}, - 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, - 'b_frames': {'key': 'bFrames', 'type': 'int'}, - 'frame_rate': {'key': 'frameRate', 'type': 'str'}, - 'slices': {'key': 'slices', 'type': 'int'}, - 'adaptive_bframe': {'key': 'adaptiveBFrame', 'type': 'bool'}, - 'profile': {'key': 'profile', 'type': 'str'}, - 'level': {'key': 'level', 'type': 'str'}, - 'buffer_window': {'key': 'bufferWindow', 'type': 'duration'}, - 'reference_frames': {'key': 'referenceFrames', 'type': 'int'}, - } - - def __init__(self, *, bitrate: int, width: str=None, height: str=None, label: str=None, max_bitrate: int=None, b_frames: int=None, frame_rate: str=None, slices: int=None, adaptive_bframe: bool=None, profile=None, level: str=None, buffer_window=None, reference_frames: int=None, **kwargs) -> None: - super(H265Layer, self).__init__(width=width, height=height, label=label, bitrate=bitrate, max_bitrate=max_bitrate, b_frames=b_frames, frame_rate=frame_rate, slices=slices, adaptive_bframe=adaptive_bframe, **kwargs) - self.profile = profile - self.level = level - self.buffer_window = buffer_window - self.reference_frames = reference_frames - self.odatatype = '#Microsoft.Media.H265Layer' - - -class H265Video(Video): - """Describes all the properties for encoding a video with the H.265 codec. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param scene_change_detection: Specifies whether or not the encoder should - insert key frames at scene changes. If not specified, the default is - false. This flag should be set to true only when the encoder is being - configured to produce a single output video. - :type scene_change_detection: bool - :param complexity: Tells the encoder how to choose its encoding settings. - Quality will provide for a higher compression ratio but at a higher cost - and longer compute time. Speed will produce a relatively larger file but - is faster and more economical. The default value is Balanced. Possible - values include: 'Speed', 'Balanced', 'Quality' - :type complexity: str or ~azure.mgmt.media.models.H265Complexity - :param layers: The collection of output H.265 layers to be produced by the - encoder. - :type layers: list[~azure.mgmt.media.models.H265Layer] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'}, - 'complexity': {'key': 'complexity', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[H265Layer]'}, - } - - def __init__(self, *, label: str=None, key_frame_interval=None, stretch_mode=None, sync_mode=None, scene_change_detection: bool=None, complexity=None, layers=None, **kwargs) -> None: - super(H265Video, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, **kwargs) - self.scene_change_detection = scene_change_detection - self.complexity = complexity - self.layers = layers - self.odatatype = '#Microsoft.Media.H265Video' - - -class Hls(Model): - """HTTP Live Streaming (HLS) packing setting for the live output. - - :param fragments_per_ts_segment: The number of fragments in an HTTP Live - Streaming (HLS) TS segment in the output of the live event. This value - does not affect the packing ratio for HLS CMAF output. - :type fragments_per_ts_segment: int - """ - - _attribute_map = { - 'fragments_per_ts_segment': {'key': 'fragmentsPerTsSegment', 'type': 'int'}, - } - - def __init__(self, *, fragments_per_ts_segment: int=None, **kwargs) -> None: - super(Hls, self).__init__(**kwargs) - self.fragments_per_ts_segment = fragments_per_ts_segment - - -class Image(Video): - """Describes the basic properties for generating thumbnails from the input - video. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JpgImage, PngImage - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param start: Required. The position in the input video from where to - start generating thumbnails. The value can be in ISO 8601 format (For - example, PT05S to start at 5 seconds), or a frame count (For example, 10 - to start at the 10th frame), or a relative value to stream duration (For - example, 10% to start at 10% of stream duration). Also supports a macro - {Best}, which tells the encoder to select the best thumbnail from the - first few seconds of the video and will only produce one thumbnail, no - matter what other settings are for Step and Range. The default value is - macro {Best}. - :type start: str - :param step: The intervals at which thumbnails are generated. The value - can be in ISO 8601 format (For example, PT05S for one image every 5 - seconds), or a frame count (For example, 30 for one image every 30 - frames), or a relative value to stream duration (For example, 10% for one - image every 10% of stream duration). Note: Step value will affect the - first generated thumbnail, which may not be exactly the one specified at - transform preset start time. This is due to the encoder, which tries to - select the best thumbnail between start time and Step position from start - time as the first output. As the default value is 10%, it means if stream - has long duration, the first generated thumbnail might be far away from - the one specified at start time. Try to select reasonable value for Step - if the first thumbnail is expected close to start time, or set Range value - at 1 if only one thumbnail is needed at start time. - :type step: str - :param range: The position relative to transform preset start time in the - input video at which to stop generating thumbnails. The value can be in - ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds - from start time), or a frame count (For example, 300 to stop at the 300th - frame from the frame at start time. If this value is 1, it means only - producing one thumbnail at start time), or a relative value to the stream - duration (For example, 50% to stop at half of stream duration from start - time). The default value is 100%, which means to stop at the end of the - stream. - :type range: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'start': {'required': True}, - } - - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'str'}, - 'step': {'key': 'step', 'type': 'str'}, - 'range': {'key': 'range', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JpgImage': 'JpgImage', '#Microsoft.Media.PngImage': 'PngImage'} - } - - def __init__(self, *, start: str, label: str=None, key_frame_interval=None, stretch_mode=None, sync_mode=None, step: str=None, range: str=None, **kwargs) -> None: - super(Image, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, **kwargs) - self.start = start - self.step = step - self.range = range - self.odatatype = '#Microsoft.Media.Image' - - -class ImageFormat(Format): - """Describes the properties for an output image file. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JpgFormat, PngFormat - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.PngFormat': 'PngFormat'} - } - - def __init__(self, *, filename_pattern: str, **kwargs) -> None: - super(ImageFormat, self).__init__(filename_pattern=filename_pattern, **kwargs) - self.odatatype = '#Microsoft.Media.ImageFormat' - - -class InputFile(InputDefinition): - """An InputDefinition for a single file. TrackSelections are scoped to the - file specified. - - All required parameters must be populated in order to send to Azure. - - :param included_tracks: The list of TrackDescriptors which define the - metadata and selection of tracks in the input. - :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param filename: Name of the file that this input definition applies to. - :type filename: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'filename': {'key': 'filename', 'type': 'str'}, - } - - def __init__(self, *, included_tracks=None, filename: str=None, **kwargs) -> None: - super(InputFile, self).__init__(included_tracks=included_tracks, **kwargs) - self.filename = filename - self.odatatype = '#Microsoft.Media.InputFile' - - -class IPAccessControl(Model): - """The IP access control. - - :param allow: The IP allow list. - :type allow: list[~azure.mgmt.media.models.IPRange] - """ - - _attribute_map = { - 'allow': {'key': 'allow', 'type': '[IPRange]'}, - } - - def __init__(self, *, allow=None, **kwargs) -> None: - super(IPAccessControl, self).__init__(**kwargs) - self.allow = allow - - -class IPRange(Model): - """The IP address range in the CIDR scheme. - - :param name: The friendly name for the IP address range. - :type name: str - :param address: The IP address. - :type address: str - :param subnet_prefix_length: The subnet mask prefix length (see CIDR - notation). - :type subnet_prefix_length: int - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'address': {'key': 'address', 'type': 'str'}, - 'subnet_prefix_length': {'key': 'subnetPrefixLength', 'type': 'int'}, - } - - def __init__(self, *, name: str=None, address: str=None, subnet_prefix_length: int=None, **kwargs) -> None: - super(IPRange, self).__init__(**kwargs) - self.name = name - self.address = address - self.subnet_prefix_length = subnet_prefix_length - - -class Job(ProxyResource): - """A Job resource type. The progress and state can be obtained by polling a - Job or subscribing to events using EventGrid. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - :vartype id: str - :ivar name: The name of the resource - :vartype name: str - :ivar type: The type of the resource. E.g. - "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - :vartype type: str - :ivar created: The UTC date and time when the customer has created the - Job, in 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype created: datetime - :ivar state: The current state of the job. Possible values include: - 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', 'Queued', - 'Scheduled' - :vartype state: str or ~azure.mgmt.media.models.JobState - :param description: Optional customer supplied description of the Job. - :type description: str - :param input: Required. The inputs for the Job. - :type input: ~azure.mgmt.media.models.JobInput - :ivar last_modified: The UTC date and time when the customer has last - updated the Job, in 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype last_modified: datetime - :param outputs: Required. The outputs for the Job. - :type outputs: list[~azure.mgmt.media.models.JobOutput] - :param priority: Priority with which the job should be processed. Higher - priority jobs are processed before lower priority jobs. If not set, the - default is normal. Possible values include: 'Low', 'Normal', 'High' - :type priority: str or ~azure.mgmt.media.models.Priority - :param correlation_data: Customer provided key, value pairs that will be - returned in Job and JobOutput state events. - :type correlation_data: dict[str, str] - :ivar start_time: The UTC date and time at which this Job began - processing. - :vartype start_time: datetime - :ivar end_time: The UTC date and time at which this Job finished - processing. - :vartype end_time: datetime - :ivar system_data: The system metadata relating to this resource. - :vartype system_data: ~azure.mgmt.media.models.SystemData - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'created': {'readonly': True}, - 'state': {'readonly': True}, - 'input': {'required': True}, - 'last_modified': {'readonly': True}, - 'outputs': {'required': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'system_data': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'created': {'key': 'properties.created', 'type': 'iso-8601'}, - 'state': {'key': 'properties.state', 'type': 'str'}, - 'description': {'key': 'properties.description', 'type': 'str'}, - 'input': {'key': 'properties.input', 'type': 'JobInput'}, - 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, - 'outputs': {'key': 'properties.outputs', 'type': '[JobOutput]'}, - 'priority': {'key': 'properties.priority', 'type': 'str'}, - 'correlation_data': {'key': 'properties.correlationData', 'type': '{str}'}, - 'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'}, - 'system_data': {'key': 'systemData', 'type': 'SystemData'}, - } - - def __init__(self, *, input, outputs, description: str=None, priority=None, correlation_data=None, **kwargs) -> None: - super(Job, self).__init__(**kwargs) - self.created = None - self.state = None - self.description = description - self.input = input - self.last_modified = None - self.outputs = outputs - self.priority = priority - self.correlation_data = correlation_data - self.start_time = None - self.end_time = None - self.system_data = None - - -class JobError(Model): - """Details of JobOutput errors. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar code: Error code describing the error. Possible values include: - 'ServiceError', 'ServiceTransientError', 'DownloadNotAccessible', - 'DownloadTransientError', 'UploadNotAccessible', 'UploadTransientError', - 'ConfigurationUnsupported', 'ContentMalformed', 'ContentUnsupported' - :vartype code: str or ~azure.mgmt.media.models.JobErrorCode - :ivar message: A human-readable language-dependent representation of the - error. - :vartype message: str - :ivar category: Helps with categorization of errors. Possible values - include: 'Service', 'Download', 'Upload', 'Configuration', 'Content' - :vartype category: str or ~azure.mgmt.media.models.JobErrorCategory - :ivar retry: Indicates that it may be possible to retry the Job. If retry - is unsuccessful, please contact Azure support via Azure Portal. Possible - values include: 'DoNotRetry', 'MayRetry' - :vartype retry: str or ~azure.mgmt.media.models.JobRetry - :ivar details: An array of details about specific errors that led to this - reported error. - :vartype details: list[~azure.mgmt.media.models.JobErrorDetail] + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.mgmt.media.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: + list[~azure.mgmt.media.models.ErrorAdditionalInfo] """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, - 'category': {'readonly': True}, - 'retry': {'readonly': True}, + 'target': {'readonly': True}, 'details': {'readonly': True}, + 'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, - 'category': {'key': 'category', 'type': 'str'}, - 'retry': {'key': 'retry', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[JobErrorDetail]'}, + 'target': {'key': 'target', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[ErrorDetail]'}, + 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__(self, **kwargs) -> None: - super(JobError, self).__init__(**kwargs) + super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None - self.category = None - self.retry = None + self.target = None self.details = None + self.additional_info = None -class JobErrorDetail(Model): - """Details of JobOutput errors. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar code: Code describing the error detail. - :vartype code: str - :ivar message: A human-readable representation of the error. - :vartype message: str - """ - - _validation = { - 'code': {'readonly': True}, - 'message': {'readonly': True}, - } - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, **kwargs) -> None: - super(JobErrorDetail, self).__init__(**kwargs) - self.code = None - self.message = None - - -class JobInput(Model): - """Base class for inputs to a Job. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobInputClip, JobInputs, JobInputSequence - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputs': 'JobInputs', '#Microsoft.Media.JobInputSequence': 'JobInputSequence'} - } - - def __init__(self, **kwargs) -> None: - super(JobInput, self).__init__(**kwargs) - self.odatatype = None - - -class JobInputClip(JobInput): - """Represents input files for a Job. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobInputAsset, JobInputHttp - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param files: List of files. Required for JobInputHttp. Maximum of 4000 - characters each. - :type files: list[str] - :param start: Defines a point on the timeline of the input media at which - processing will start. Defaults to the beginning of the input media. - :type start: ~azure.mgmt.media.models.ClipTime - :param end: Defines a point on the timeline of the input media at which - processing will end. Defaults to the end of the input media. - :type end: ~azure.mgmt.media.models.ClipTime - :param label: A label that is assigned to a JobInputClip, that is used to - satisfy a reference used in the Transform. For example, a Transform can be - authored so as to take an image file with the label 'xyz' and apply it as - an overlay onto the input video before it is encoded. When submitting a - Job, exactly one of the JobInputs should be the image file, and it should - have the label 'xyz'. - :type label: str - :param input_definitions: Defines a list of InputDefinitions. For each - InputDefinition, it defines a list of track selections and related - metadata. - :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'files': {'key': 'files', 'type': '[str]'}, - 'start': {'key': 'start', 'type': 'ClipTime'}, - 'end': {'key': 'end', 'type': 'ClipTime'}, - 'label': {'key': 'label', 'type': 'str'}, - 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JobInputAsset': 'JobInputAsset', '#Microsoft.Media.JobInputHttp': 'JobInputHttp'} - } - - def __init__(self, *, files=None, start=None, end=None, label: str=None, input_definitions=None, **kwargs) -> None: - super(JobInputClip, self).__init__(**kwargs) - self.files = files - self.start = start - self.end = end - self.label = label - self.input_definitions = input_definitions - self.odatatype = '#Microsoft.Media.JobInputClip' - - -class JobInputAsset(JobInputClip): - """Represents an Asset for input into a Job. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param files: List of files. Required for JobInputHttp. Maximum of 4000 - characters each. - :type files: list[str] - :param start: Defines a point on the timeline of the input media at which - processing will start. Defaults to the beginning of the input media. - :type start: ~azure.mgmt.media.models.ClipTime - :param end: Defines a point on the timeline of the input media at which - processing will end. Defaults to the end of the input media. - :type end: ~azure.mgmt.media.models.ClipTime - :param label: A label that is assigned to a JobInputClip, that is used to - satisfy a reference used in the Transform. For example, a Transform can be - authored so as to take an image file with the label 'xyz' and apply it as - an overlay onto the input video before it is encoded. When submitting a - Job, exactly one of the JobInputs should be the image file, and it should - have the label 'xyz'. - :type label: str - :param input_definitions: Defines a list of InputDefinitions. For each - InputDefinition, it defines a list of track selections and related - metadata. - :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] - :param asset_name: Required. The name of the input Asset. - :type asset_name: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'asset_name': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'files': {'key': 'files', 'type': '[str]'}, - 'start': {'key': 'start', 'type': 'ClipTime'}, - 'end': {'key': 'end', 'type': 'ClipTime'}, - 'label': {'key': 'label', 'type': 'str'}, - 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, - 'asset_name': {'key': 'assetName', 'type': 'str'}, - } - - def __init__(self, *, asset_name: str, files=None, start=None, end=None, label: str=None, input_definitions=None, **kwargs) -> None: - super(JobInputAsset, self).__init__(files=files, start=start, end=end, label=label, input_definitions=input_definitions, **kwargs) - self.asset_name = asset_name - self.odatatype = '#Microsoft.Media.JobInputAsset' - - -class JobInputHttp(JobInputClip): - """Represents HTTPS job input. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param files: List of files. Required for JobInputHttp. Maximum of 4000 - characters each. - :type files: list[str] - :param start: Defines a point on the timeline of the input media at which - processing will start. Defaults to the beginning of the input media. - :type start: ~azure.mgmt.media.models.ClipTime - :param end: Defines a point on the timeline of the input media at which - processing will end. Defaults to the end of the input media. - :type end: ~azure.mgmt.media.models.ClipTime - :param label: A label that is assigned to a JobInputClip, that is used to - satisfy a reference used in the Transform. For example, a Transform can be - authored so as to take an image file with the label 'xyz' and apply it as - an overlay onto the input video before it is encoded. When submitting a - Job, exactly one of the JobInputs should be the image file, and it should - have the label 'xyz'. - :type label: str - :param input_definitions: Defines a list of InputDefinitions. For each - InputDefinition, it defines a list of track selections and related - metadata. - :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] - :param base_uri: Base URI for HTTPS job input. It will be concatenated - with provided file names. If no base uri is given, then the provided file - list is assumed to be fully qualified uris. Maximum length of 4000 - characters. - :type base_uri: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'files': {'key': 'files', 'type': '[str]'}, - 'start': {'key': 'start', 'type': 'ClipTime'}, - 'end': {'key': 'end', 'type': 'ClipTime'}, - 'label': {'key': 'label', 'type': 'str'}, - 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, - 'base_uri': {'key': 'baseUri', 'type': 'str'}, - } - - def __init__(self, *, files=None, start=None, end=None, label: str=None, input_definitions=None, base_uri: str=None, **kwargs) -> None: - super(JobInputHttp, self).__init__(files=files, start=start, end=end, label=label, input_definitions=input_definitions, **kwargs) - self.base_uri = base_uri - self.odatatype = '#Microsoft.Media.JobInputHttp' - - -class JobInputs(JobInput): - """Describes a list of inputs to a Job. +class ErrorResponse(Model): + """Error response. - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param inputs: List of inputs to a Job. - :type inputs: list[~azure.mgmt.media.models.JobInput] - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[JobInput]'}, - } - - def __init__(self, *, inputs=None, **kwargs) -> None: - super(JobInputs, self).__init__(**kwargs) - self.inputs = inputs - self.odatatype = '#Microsoft.Media.JobInputs' - - -class JobInputSequence(JobInput): - """A Sequence contains an ordered list of Clips where each clip is a JobInput. - The Sequence will be treated as a single input. + Common error response for all Azure Resource Manager APIs to return error + details for failed operations. (This also follows the OData error response + format.). - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param inputs: JobInputs that make up the timeline. - :type inputs: list[~azure.mgmt.media.models.JobInputClip] + :param error: The error object. + :type error: ~azure.mgmt.media.models.ErrorDetail """ - _validation = { - 'odatatype': {'required': True}, - } - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[JobInputClip]'}, + 'error': {'key': 'error', 'type': 'ErrorDetail'}, } - def __init__(self, *, inputs=None, **kwargs) -> None: - super(JobInputSequence, self).__init__(**kwargs) - self.inputs = inputs - self.odatatype = '#Microsoft.Media.JobInputSequence' - - -class JobOutput(Model): - """Describes all the properties of a JobOutput. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobOutputAsset + def __init__(self, *, error=None, **kwargs) -> None: + super(ErrorResponse, self).__init__(**kwargs) + self.error = error - Variables are only populated by the server, and will be ignored when - sending a request. - All required parameters must be populated in order to send to Azure. +class ErrorResponseException(HttpOperationError): + """Server responsed with exception of type: 'ErrorResponse'. - :ivar error: If the JobOutput is in the Error state, it contains the - details of the error. - :vartype error: ~azure.mgmt.media.models.JobError - :ivar state: Describes the state of the JobOutput. Possible values - include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', - 'Queued', 'Scheduled' - :vartype state: str or ~azure.mgmt.media.models.JobState - :ivar progress: If the JobOutput is in a Processing state, this contains - the Job completion percentage. The value is an estimate and not intended - to be used to predict Job completion times. To determine if the JobOutput - is complete, use the State property. - :vartype progress: int - :param label: A label that is assigned to a JobOutput in order to help - uniquely identify it. This is useful when your Transform has more than one - TransformOutput, whereby your Job has more than one JobOutput. In such - cases, when you submit the Job, you will add two or more JobOutputs, in - the same order as TransformOutputs in the Transform. Subsequently, when - you retrieve the Job, either through events or on a GET request, you can - use the label to easily identify the JobOutput. If a label is not - provided, a default value of '{presetName}_{outputIndex}' will be used, - where the preset name is the name of the preset in the corresponding - TransformOutput and the output index is the relative index of the this - JobOutput within the Job. Note that this index is the same as the relative - index of the corresponding TransformOutput within its Transform. - :type label: str - :ivar start_time: The UTC date and time at which this Job Output began - processing. - :vartype start_time: datetime - :ivar end_time: The UTC date and time at which this Job Output finished - processing. - :vartype end_time: datetime - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param deserialize: A deserializer + :param response: Server response to be deserialized. """ - _validation = { - 'error': {'readonly': True}, - 'state': {'readonly': True}, - 'progress': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'error': {'key': 'error', 'type': 'JobError'}, - 'state': {'key': 'state', 'type': 'str'}, - 'progress': {'key': 'progress', 'type': 'int'}, - 'label': {'key': 'label', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.JobOutputAsset': 'JobOutputAsset'} - } - - def __init__(self, *, label: str=None, **kwargs) -> None: - super(JobOutput, self).__init__(**kwargs) - self.error = None - self.state = None - self.progress = None - self.label = label - self.start_time = None - self.end_time = None - self.odatatype = None + def __init__(self, deserialize, response, *args): + super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args) -class JobOutputAsset(JobOutput): - """Represents an Asset used as a JobOutput. - Variables are only populated by the server, and will be ignored when - sending a request. +class FilterTrackPropertyCondition(Model): + """The class to specify one track property condition. All required parameters must be populated in order to send to Azure. - :ivar error: If the JobOutput is in the Error state, it contains the - details of the error. - :vartype error: ~azure.mgmt.media.models.JobError - :ivar state: Describes the state of the JobOutput. Possible values - include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', - 'Queued', 'Scheduled' - :vartype state: str or ~azure.mgmt.media.models.JobState - :ivar progress: If the JobOutput is in a Processing state, this contains - the Job completion percentage. The value is an estimate and not intended - to be used to predict Job completion times. To determine if the JobOutput - is complete, use the State property. - :vartype progress: int - :param label: A label that is assigned to a JobOutput in order to help - uniquely identify it. This is useful when your Transform has more than one - TransformOutput, whereby your Job has more than one JobOutput. In such - cases, when you submit the Job, you will add two or more JobOutputs, in - the same order as TransformOutputs in the Transform. Subsequently, when - you retrieve the Job, either through events or on a GET request, you can - use the label to easily identify the JobOutput. If a label is not - provided, a default value of '{presetName}_{outputIndex}' will be used, - where the preset name is the name of the preset in the corresponding - TransformOutput and the output index is the relative index of the this - JobOutput within the Job. Note that this index is the same as the relative - index of the corresponding TransformOutput within its Transform. - :type label: str - :ivar start_time: The UTC date and time at which this Job Output began - processing. - :vartype start_time: datetime - :ivar end_time: The UTC date and time at which this Job Output finished - processing. - :vartype end_time: datetime - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param asset_name: Required. The name of the output Asset. - :type asset_name: str + :param property: Required. The track property type. Possible values + include: 'Unknown', 'Type', 'Name', 'Language', 'FourCC', 'Bitrate' + :type property: str or ~azure.mgmt.media.models.FilterTrackPropertyType + :param value: Required. The track property value. + :type value: str + :param operation: Required. The track property condition operation. + Possible values include: 'Equal', 'NotEqual' + :type operation: str or + ~azure.mgmt.media.models.FilterTrackPropertyCompareOperation """ _validation = { - 'error': {'readonly': True}, - 'state': {'readonly': True}, - 'progress': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'odatatype': {'required': True}, - 'asset_name': {'required': True}, + 'property': {'required': True}, + 'value': {'required': True}, + 'operation': {'required': True}, } _attribute_map = { - 'error': {'key': 'error', 'type': 'JobError'}, - 'state': {'key': 'state', 'type': 'str'}, - 'progress': {'key': 'progress', 'type': 'int'}, - 'label': {'key': 'label', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'asset_name': {'key': 'assetName', 'type': 'str'}, + 'property': {'key': 'property', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + 'operation': {'key': 'operation', 'type': 'str'}, } - def __init__(self, *, asset_name: str, label: str=None, **kwargs) -> None: - super(JobOutputAsset, self).__init__(label=label, **kwargs) - self.asset_name = asset_name - self.odatatype = '#Microsoft.Media.JobOutputAsset' + def __init__(self, *, property, value: str, operation, **kwargs) -> None: + super(FilterTrackPropertyCondition, self).__init__(**kwargs) + self.property = property + self.value = value + self.operation = operation -class JpgFormat(ImageFormat): - """Describes the settings for producing JPEG thumbnails. +class FilterTrackSelection(Model): + """Representing a list of FilterTrackPropertyConditions to select a track. + The filters are combined using a logical AND operation. All required parameters must be populated in order to send to Azure. - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param track_selections: Required. The track selections. + :type track_selections: + list[~azure.mgmt.media.models.FilterTrackPropertyCondition] """ _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, + 'track_selections': {'required': True}, } _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + 'track_selections': {'key': 'trackSelections', 'type': '[FilterTrackPropertyCondition]'}, } - def __init__(self, *, filename_pattern: str, **kwargs) -> None: - super(JpgFormat, self).__init__(filename_pattern=filename_pattern, **kwargs) - self.odatatype = '#Microsoft.Media.JpgFormat' + def __init__(self, *, track_selections, **kwargs) -> None: + super(FilterTrackSelection, self).__init__(**kwargs) + self.track_selections = track_selections -class JpgImage(Image): - """Describes the properties for producing a series of JPEG images from the - input video. +class FirstQuality(Model): + """Filter First Quality. All required parameters must be populated in order to send to Azure. - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param start: Required. The position in the input video from where to - start generating thumbnails. The value can be in ISO 8601 format (For - example, PT05S to start at 5 seconds), or a frame count (For example, 10 - to start at the 10th frame), or a relative value to stream duration (For - example, 10% to start at 10% of stream duration). Also supports a macro - {Best}, which tells the encoder to select the best thumbnail from the - first few seconds of the video and will only produce one thumbnail, no - matter what other settings are for Step and Range. The default value is - macro {Best}. - :type start: str - :param step: The intervals at which thumbnails are generated. The value - can be in ISO 8601 format (For example, PT05S for one image every 5 - seconds), or a frame count (For example, 30 for one image every 30 - frames), or a relative value to stream duration (For example, 10% for one - image every 10% of stream duration). Note: Step value will affect the - first generated thumbnail, which may not be exactly the one specified at - transform preset start time. This is due to the encoder, which tries to - select the best thumbnail between start time and Step position from start - time as the first output. As the default value is 10%, it means if stream - has long duration, the first generated thumbnail might be far away from - the one specified at start time. Try to select reasonable value for Step - if the first thumbnail is expected close to start time, or set Range value - at 1 if only one thumbnail is needed at start time. - :type step: str - :param range: The position relative to transform preset start time in the - input video at which to stop generating thumbnails. The value can be in - ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds - from start time), or a frame count (For example, 300 to stop at the 300th - frame from the frame at start time. If this value is 1, it means only - producing one thumbnail at start time), or a relative value to the stream - duration (For example, 50% to stop at half of stream duration from start - time). The default value is 100%, which means to stop at the end of the - stream. - :type range: str - :param layers: A collection of output JPEG image layers to be produced by - the encoder. - :type layers: list[~azure.mgmt.media.models.JpgLayer] - :param sprite_column: Sets the number of columns used in thumbnail sprite - image. The number of rows are automatically calculated and a VTT file is - generated with the coordinate mappings for each thumbnail in the sprite. - Note: this value should be a positive integer and a proper value is - recommended so that the output image resolution will not go beyond JPEG - maximum pixel resolution limit 65535x65535. - :type sprite_column: int + :param bitrate: Required. The first quality bitrate. + :type bitrate: int """ _validation = { - 'odatatype': {'required': True}, - 'start': {'required': True}, + 'bitrate': {'required': True}, } _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'str'}, - 'step': {'key': 'step', 'type': 'str'}, - 'range': {'key': 'range', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[JpgLayer]'}, - 'sprite_column': {'key': 'spriteColumn', 'type': 'int'}, + 'bitrate': {'key': 'bitrate', 'type': 'int'}, } - def __init__(self, *, start: str, label: str=None, key_frame_interval=None, stretch_mode=None, sync_mode=None, step: str=None, range: str=None, layers=None, sprite_column: int=None, **kwargs) -> None: - super(JpgImage, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, start=start, step=step, range=range, **kwargs) - self.layers = layers - self.sprite_column = sprite_column - self.odatatype = '#Microsoft.Media.JpgImage' + def __init__(self, *, bitrate: int, **kwargs) -> None: + super(FirstQuality, self).__init__(**kwargs) + self.bitrate = bitrate -class JpgLayer(Layer): - """Describes the settings to produce a JPEG image from the input video. +class Hls(Model): + """HTTP Live Streaming (HLS) packing setting for the live output. - All required parameters must be populated in order to send to Azure. + :param fragments_per_ts_segment: The number of fragments in an HTTP Live + Streaming (HLS) TS segment in the output of the live event. This value + does not affect the packing ratio for HLS CMAF output. + :type fragments_per_ts_segment: int + """ - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param quality: The compression quality of the JPEG output. Range is from - 0-100 and the default is 70. - :type quality: int + _attribute_map = { + 'fragments_per_ts_segment': {'key': 'fragmentsPerTsSegment', 'type': 'int'}, + } + + def __init__(self, *, fragments_per_ts_segment: int=None, **kwargs) -> None: + super(Hls, self).__init__(**kwargs) + self.fragments_per_ts_segment = fragments_per_ts_segment + + +class IPAccessControl(Model): + """The IP access control. + + :param allow: The IP allow list. + :type allow: list[~azure.mgmt.media.models.IPRange] """ - _validation = { - 'odatatype': {'required': True}, + _attribute_map = { + 'allow': {'key': 'allow', 'type': '[IPRange]'}, } + def __init__(self, *, allow=None, **kwargs) -> None: + super(IPAccessControl, self).__init__(**kwargs) + self.allow = allow + + +class IPRange(Model): + """The IP address range in the CIDR scheme. + + :param name: The friendly name for the IP address range. + :type name: str + :param address: The IP address. + :type address: str + :param subnet_prefix_length: The subnet mask prefix length (see CIDR + notation). + :type subnet_prefix_length: int + """ + _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'address': {'key': 'address', 'type': 'str'}, + 'subnet_prefix_length': {'key': 'subnetPrefixLength', 'type': 'int'}, } - def __init__(self, *, width: str=None, height: str=None, label: str=None, quality: int=None, **kwargs) -> None: - super(JpgLayer, self).__init__(width=width, height=height, label=label, **kwargs) - self.quality = quality - self.odatatype = '#Microsoft.Media.JpgLayer' + def __init__(self, *, name: str=None, address: str=None, subnet_prefix_length: int=None, **kwargs) -> None: + super(IPRange, self).__init__(**kwargs) + self.name = name + self.address = address + self.subnet_prefix_length = subnet_prefix_length class KeyDelivery(Model): @@ -4626,7 +2387,8 @@ class LiveEventEncoding(Model): encoder transcodes the incoming stream into multiple bitrates or layers. See https://go.microsoft.com/fwlink/?linkid=2095101 for more information. This property cannot be modified after the live event is created. Possible - values include: 'None', 'Standard', 'Premium1080p' + values include: 'None', 'Standard', 'Premium1080p', 'PassthroughBasic', + 'PassthroughStandard' :type encoding_type: str or ~azure.mgmt.media.models.LiveEventEncodingType :param preset_name: The optional encoding preset name, used when encodingType is not None. This value is specified at creation time and @@ -4941,6 +2703,8 @@ class LiveOutput(ProxyResource): values include: 'Creating', 'Running', 'Deleting' :vartype resource_state: str or ~azure.mgmt.media.models.LiveOutputResourceState + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData """ _validation = { @@ -4953,6 +2717,7 @@ class LiveOutput(ProxyResource): 'last_modified': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'resource_state': {'readonly': True}, + 'system_data': {'readonly': True}, } _attribute_map = { @@ -4969,6 +2734,7 @@ class LiveOutput(ProxyResource): 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_state': {'key': 'properties.resourceState', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__(self, *, asset_name: str, archive_window_length, description: str=None, manifest_name: str=None, hls=None, output_snap_time: int=None, **kwargs) -> None: @@ -4983,28 +2749,7 @@ def __init__(self, *, asset_name: str, archive_window_length, description: str=N self.last_modified = None self.provisioning_state = None self.resource_state = None - - -class Location(Model): - """Location. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(Location, self).__init__(**kwargs) - self.name = name + self.system_data = None class LogSpecification(Model): @@ -5073,6 +2818,11 @@ class MediaService(TrackedResource): :param key_delivery: The Key Delivery properties for Media Services account. :type key_delivery: ~azure.mgmt.media.models.KeyDelivery + :param public_network_access: Whether or not public network access is + allowed for resources under the Media Services account. Possible values + include: 'Enabled', 'Disabled' + :type public_network_access: str or + ~azure.mgmt.media.models.PublicNetworkAccess :param identity: The Managed Identity for the Media Services account. :type identity: ~azure.mgmt.media.models.MediaServiceIdentity :ivar system_data: The system metadata relating to this resource. @@ -5099,17 +2849,19 @@ class MediaService(TrackedResource): 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, + 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } - def __init__(self, *, location: str, tags=None, storage_accounts=None, storage_authentication=None, encryption=None, key_delivery=None, identity=None, **kwargs) -> None: + def __init__(self, *, location: str, tags=None, storage_accounts=None, storage_authentication=None, encryption=None, key_delivery=None, public_network_access=None, identity=None, **kwargs) -> None: super(MediaService, self).__init__(tags=tags, location=location, **kwargs) self.media_service_id = None self.storage_accounts = storage_accounts self.storage_authentication = storage_authentication self.encryption = encryption self.key_delivery = key_delivery + self.public_network_access = public_network_access self.identity = identity self.system_data = None @@ -5122,13 +2874,15 @@ class MediaServiceIdentity(Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The identity type. Possible values include: - 'SystemAssigned', 'None' - :type type: str or ~azure.mgmt.media.models.ManagedIdentityType + :param type: Required. The identity type. + :type type: str :ivar principal_id: The Principal ID of the identity. :vartype principal_id: str :ivar tenant_id: The Tenant ID of the identity. :vartype tenant_id: str + :param user_assigned_identities: The user assigned managed identities. + :type user_assigned_identities: dict[str, + ~azure.mgmt.media.models.UserAssignedManagedIdentity] """ _validation = { @@ -5141,13 +2895,15 @@ class MediaServiceIdentity(Model): 'type': {'key': 'type', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, 'tenant_id': {'key': 'tenantId', 'type': 'str'}, + 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } - def __init__(self, *, type, **kwargs) -> None: + def __init__(self, *, type: str, user_assigned_identities=None, **kwargs) -> None: super(MediaServiceIdentity, self).__init__(**kwargs) self.type = type self.principal_id = None self.tenant_id = None + self.user_assigned_identities = user_assigned_identities class MediaServiceUpdate(Model): @@ -5171,6 +2927,11 @@ class MediaServiceUpdate(Model): :param key_delivery: The Key Delivery properties for Media Services account. :type key_delivery: ~azure.mgmt.media.models.KeyDelivery + :param public_network_access: Whether or not public network access is + allowed for resources under the Media Services account. Possible values + include: 'Enabled', 'Disabled' + :type public_network_access: str or + ~azure.mgmt.media.models.PublicNetworkAccess :param identity: The Managed Identity for the Media Services account. :type identity: ~azure.mgmt.media.models.MediaServiceIdentity """ @@ -5186,10 +2947,11 @@ class MediaServiceUpdate(Model): 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, + 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, } - def __init__(self, *, tags=None, storage_accounts=None, storage_authentication=None, encryption=None, key_delivery=None, identity=None, **kwargs) -> None: + def __init__(self, *, tags=None, storage_accounts=None, storage_authentication=None, encryption=None, key_delivery=None, public_network_access=None, identity=None, **kwargs) -> None: super(MediaServiceUpdate, self).__init__(**kwargs) self.tags = tags self.media_service_id = None @@ -5197,6 +2959,7 @@ def __init__(self, *, tags=None, storage_accounts=None, storage_authentication=N self.storage_authentication = storage_authentication self.encryption = encryption self.key_delivery = key_delivery + self.public_network_access = public_network_access self.identity = identity @@ -5316,102 +3079,6 @@ def __init__(self, *, supported_aggregation_types=None, **kwargs) -> None: self.supported_time_grain_types = None -class MultiBitrateFormat(Format): - """Describes the properties for producing a collection of GOP aligned - multi-bitrate files. The default behavior is to produce one output file for - each video layer which is muxed together with all the audios. The exact - output files produced can be controlled by specifying the outputFiles - collection. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: Mp4Format, TransportStreamFormat - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param output_files: The list of output files to produce. Each entry in - the list is a set of audio and video layer labels to be muxed together . - :type output_files: list[~azure.mgmt.media.models.OutputFile] - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.Mp4Format': 'Mp4Format', '#Microsoft.Media.TransportStreamFormat': 'TransportStreamFormat'} - } - - def __init__(self, *, filename_pattern: str, output_files=None, **kwargs) -> None: - super(MultiBitrateFormat, self).__init__(filename_pattern=filename_pattern, **kwargs) - self.output_files = output_files - self.odatatype = '#Microsoft.Media.MultiBitrateFormat' - - -class Mp4Format(MultiBitrateFormat): - """Describes the properties for an output ISO MP4 file. - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param output_files: The list of output files to produce. Each entry in - the list is a set of audio and video layer labels to be muxed together . - :type output_files: list[~azure.mgmt.media.models.OutputFile] - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - } - - def __init__(self, *, filename_pattern: str, output_files=None, **kwargs) -> None: - super(Mp4Format, self).__init__(filename_pattern=filename_pattern, output_files=output_files, **kwargs) - self.odatatype = '#Microsoft.Media.Mp4Format' - - class NoEncryption(Model): """Class for NoEncryption scheme. @@ -5428,35 +3095,6 @@ def __init__(self, *, enabled_protocols=None, **kwargs) -> None: self.enabled_protocols = enabled_protocols -class ODataError(Model): - """Information about an error. - - :param code: A language-independent error name. - :type code: str - :param message: The error message. - :type message: str - :param target: The target of the error (for example, the name of the - property in error). - :type target: str - :param details: The error details. - :type details: list[~azure.mgmt.media.models.ODataError] - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'target': {'key': 'target', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[ODataError]'}, - } - - def __init__(self, *, code: str=None, message: str=None, target: str=None, details=None, **kwargs) -> None: - super(ODataError, self).__init__(**kwargs) - self.code = code - self.message = message - self.target = target - self.details = details - - class Operation(Model): """An operation. @@ -5500,220 +3138,48 @@ def __init__(self, *, name: str, display=None, origin: str=None, properties=None self.action_type = action_type -class OperationDisplay(Model): - """Operation details. - - :param provider: The service provider. - :type provider: str - :param resource: Resource on which the operation is performed. - :type resource: str - :param operation: The operation type. - :type operation: str - :param description: The operation description. - :type description: str - """ - - _attribute_map = { - 'provider': {'key': 'provider', 'type': 'str'}, - 'resource': {'key': 'resource', 'type': 'str'}, - 'operation': {'key': 'operation', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - } - - def __init__(self, *, provider: str=None, resource: str=None, operation: str=None, description: str=None, **kwargs) -> None: - super(OperationDisplay, self).__init__(**kwargs) - self.provider = provider - self.resource = resource - self.operation = operation - self.description = description - - -class OutputFile(Model): - """Represents an output file produced. - - All required parameters must be populated in order to send to Azure. - - :param labels: Required. The list of labels that describe how the encoder - should multiplex video and audio into an output file. For example, if the - encoder is producing two video layers with labels v1 and v2, and one audio - layer with label a1, then an array like '[v1, a1]' tells the encoder to - produce an output file with the video track represented by v1 and the - audio track represented by a1. - :type labels: list[str] - """ - - _validation = { - 'labels': {'required': True}, - } - - _attribute_map = { - 'labels': {'key': 'labels', 'type': '[str]'}, - } - - def __init__(self, *, labels, **kwargs) -> None: - super(OutputFile, self).__init__(**kwargs) - self.labels = labels - - -class PngFormat(ImageFormat): - """Describes the settings for producing PNG thumbnails. - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - def __init__(self, *, filename_pattern: str, **kwargs) -> None: - super(PngFormat, self).__init__(filename_pattern=filename_pattern, **kwargs) - self.odatatype = '#Microsoft.Media.PngFormat' - +class OperationCollection(Model): + """A collection of Operation items. -class PngImage(Image): - """Describes the properties for producing a series of PNG images from the - input video. - - All required parameters must be populated in order to send to Azure. - - :param label: An optional label for the codec. The label can be used to - control muxing behavior. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param key_frame_interval: The distance between two key frames. The value - should be non-zero in the range [0.5, 20] seconds, specified in ISO 8601 - format. The default is 2 seconds(PT2S). Note that this setting is ignored - if VideoSyncMode.Passthrough is set, where the KeyFrameInterval value will - follow the input source setting. - :type key_frame_interval: timedelta - :param stretch_mode: The resizing mode - how the input video will be - resized to fit the desired output resolution(s). Default is AutoSize. - Possible values include: 'None', 'AutoSize', 'AutoFit' - :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode - :param sync_mode: The Video Sync Mode. Possible values include: 'Auto', - 'Passthrough', 'Cfr', 'Vfr' - :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode - :param start: Required. The position in the input video from where to - start generating thumbnails. The value can be in ISO 8601 format (For - example, PT05S to start at 5 seconds), or a frame count (For example, 10 - to start at the 10th frame), or a relative value to stream duration (For - example, 10% to start at 10% of stream duration). Also supports a macro - {Best}, which tells the encoder to select the best thumbnail from the - first few seconds of the video and will only produce one thumbnail, no - matter what other settings are for Step and Range. The default value is - macro {Best}. - :type start: str - :param step: The intervals at which thumbnails are generated. The value - can be in ISO 8601 format (For example, PT05S for one image every 5 - seconds), or a frame count (For example, 30 for one image every 30 - frames), or a relative value to stream duration (For example, 10% for one - image every 10% of stream duration). Note: Step value will affect the - first generated thumbnail, which may not be exactly the one specified at - transform preset start time. This is due to the encoder, which tries to - select the best thumbnail between start time and Step position from start - time as the first output. As the default value is 10%, it means if stream - has long duration, the first generated thumbnail might be far away from - the one specified at start time. Try to select reasonable value for Step - if the first thumbnail is expected close to start time, or set Range value - at 1 if only one thumbnail is needed at start time. - :type step: str - :param range: The position relative to transform preset start time in the - input video at which to stop generating thumbnails. The value can be in - ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds - from start time), or a frame count (For example, 300 to stop at the 300th - frame from the frame at start time. If this value is 1, it means only - producing one thumbnail at start time), or a relative value to the stream - duration (For example, 50% to stop at half of stream duration from start - time). The default value is 100%, which means to stop at the end of the - stream. - :type range: str - :param layers: A collection of output PNG image layers to be produced by - the encoder. - :type layers: list[~azure.mgmt.media.models.PngLayer] + :param value: A collection of Operation items. + :type value: list[~azure.mgmt.media.models.Operation] """ - _validation = { - 'odatatype': {'required': True}, - 'start': {'required': True}, - } - _attribute_map = { - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, - 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, - 'sync_mode': {'key': 'syncMode', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'str'}, - 'step': {'key': 'step', 'type': 'str'}, - 'range': {'key': 'range', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[PngLayer]'}, + 'value': {'key': 'value', 'type': '[Operation]'}, } - def __init__(self, *, start: str, label: str=None, key_frame_interval=None, stretch_mode=None, sync_mode=None, step: str=None, range: str=None, layers=None, **kwargs) -> None: - super(PngImage, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, start=start, step=step, range=range, **kwargs) - self.layers = layers - self.odatatype = '#Microsoft.Media.PngImage' - + def __init__(self, *, value=None, **kwargs) -> None: + super(OperationCollection, self).__init__(**kwargs) + self.value = value -class PngLayer(Layer): - """Describes the settings to produce a PNG image from the input video. - All required parameters must be populated in order to send to Azure. +class OperationDisplay(Model): + """Operation details. - :param width: The width of the output video for this layer. The value can - be absolute (in pixels) or relative (in percentage). For example 50% means - the output video has half as many pixels in width as the input. - :type width: str - :param height: The height of the output video for this layer. The value - can be absolute (in pixels) or relative (in percentage). For example 50% - means the output video has half as many pixels in height as the input. - :type height: str - :param label: The alphanumeric label for this layer, which can be used in - multiplexing different video and audio layers, or in naming the output - file. - :type label: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str + :param provider: The service provider. + :type provider: str + :param resource: Resource on which the operation is performed. + :type resource: str + :param operation: The operation type. + :type operation: str + :param description: The operation description. + :type description: str """ - _validation = { - 'odatatype': {'required': True}, - } - _attribute_map = { - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - 'label': {'key': 'label', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'resource': {'key': 'resource', 'type': 'str'}, + 'operation': {'key': 'operation', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, } - def __init__(self, *, width: str=None, height: str=None, label: str=None, **kwargs) -> None: - super(PngLayer, self).__init__(width=width, height=height, label=label, **kwargs) - self.odatatype = '#Microsoft.Media.PngLayer' + def __init__(self, *, provider: str=None, resource: str=None, operation: str=None, description: str=None, **kwargs) -> None: + super(OperationDisplay, self).__init__(**kwargs) + self.provider = provider + self.resource = resource + self.operation = operation + self.description = description class PresentationTimeRange(Model): @@ -5963,245 +3429,33 @@ def __init__(self, **kwargs) -> None: self.service_specification = None -class Provider(Model): - """A resource provider. - - All required parameters must be populated in order to send to Azure. - - :param provider_name: Required. The provider name. - :type provider_name: str - """ - - _validation = { - 'provider_name': {'required': True}, - } - - _attribute_map = { - 'provider_name': {'key': 'providerName', 'type': 'str'}, - } - - def __init__(self, *, provider_name: str, **kwargs) -> None: - super(Provider, self).__init__(**kwargs) - self.provider_name = provider_name - - -class Rectangle(Model): - """Describes the properties of a rectangular window applied to the input media - before processing it. - - :param left: The number of pixels from the left-margin. This can be - absolute pixel value (e.g 100), or relative to the size of the video (For - example, 50%). - :type left: str - :param top: The number of pixels from the top-margin. This can be absolute - pixel value (e.g 100), or relative to the size of the video (For example, - 50%). - :type top: str - :param width: The width of the rectangular region in pixels. This can be - absolute pixel value (e.g 100), or relative to the size of the video (For - example, 50%). - :type width: str - :param height: The height of the rectangular region in pixels. This can be - absolute pixel value (e.g 100), or relative to the size of the video (For - example, 50%). - :type height: str - """ - - _attribute_map = { - 'left': {'key': 'left', 'type': 'str'}, - 'top': {'key': 'top', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__(self, *, left: str=None, top: str=None, width: str=None, height: str=None, **kwargs) -> None: - super(Rectangle, self).__init__(**kwargs) - self.left = left - self.top = top - self.width = width - self.height = height - - -class SelectAudioTrackByAttribute(AudioTrackDescriptor): - """Select audio tracks from the input by specifying an attribute and an - attribute filter. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channel_mapping: Optional designation for single channel audio - tracks. Can be used to combine the tracks into stereo or multi-channel - audio tracks. Possible values include: 'FrontLeft', 'FrontRight', - 'Center', 'LowFrequencyEffects', 'BackLeft', 'BackRight', 'StereoLeft', - 'StereoRight' - :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping - :param attribute: Required. The TrackAttribute to filter the tracks by. - Possible values include: 'Bitrate', 'Language' - :type attribute: str or ~azure.mgmt.media.models.TrackAttribute - :param filter: Required. The type of AttributeFilter to apply to the - TrackAttribute in order to select the tracks. Possible values include: - 'All', 'Top', 'Bottom', 'ValueEquals' - :type filter: str or ~azure.mgmt.media.models.AttributeFilter - :param filter_value: The value to filter the tracks by. Only used when - AttributeFilter.ValueEquals is specified for the Filter property. - :type filter_value: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'attribute': {'required': True}, - 'filter': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, - 'attribute': {'key': 'attribute', 'type': 'str'}, - 'filter': {'key': 'filter', 'type': 'str'}, - 'filter_value': {'key': 'filterValue', 'type': 'str'}, - } - - def __init__(self, *, attribute, filter, channel_mapping=None, filter_value: str=None, **kwargs) -> None: - super(SelectAudioTrackByAttribute, self).__init__(channel_mapping=channel_mapping, **kwargs) - self.attribute = attribute - self.filter = filter - self.filter_value = filter_value - self.odatatype = '#Microsoft.Media.SelectAudioTrackByAttribute' - - -class SelectAudioTrackById(AudioTrackDescriptor): - """Select audio tracks from the input by specifying a track identifier. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param channel_mapping: Optional designation for single channel audio - tracks. Can be used to combine the tracks into stereo or multi-channel - audio tracks. Possible values include: 'FrontLeft', 'FrontRight', - 'Center', 'LowFrequencyEffects', 'BackLeft', 'BackRight', 'StereoLeft', - 'StereoRight' - :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping - :param track_id: Required. Track identifier to select - :type track_id: long - """ - - _validation = { - 'odatatype': {'required': True}, - 'track_id': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, - 'track_id': {'key': 'trackId', 'type': 'long'}, - } - - def __init__(self, *, track_id: int, channel_mapping=None, **kwargs) -> None: - super(SelectAudioTrackById, self).__init__(channel_mapping=channel_mapping, **kwargs) - self.track_id = track_id - self.odatatype = '#Microsoft.Media.SelectAudioTrackById' - - -class VideoTrackDescriptor(TrackDescriptor): - """A TrackSelection to select video tracks. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: SelectVideoTrackByAttribute, SelectVideoTrackById - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - } - - _subtype_map = { - 'odatatype': {'#Microsoft.Media.SelectVideoTrackByAttribute': 'SelectVideoTrackByAttribute', '#Microsoft.Media.SelectVideoTrackById': 'SelectVideoTrackById'} - } - - def __init__(self, **kwargs) -> None: - super(VideoTrackDescriptor, self).__init__(**kwargs) - self.odatatype = '#Microsoft.Media.VideoTrackDescriptor' - - -class SelectVideoTrackByAttribute(VideoTrackDescriptor): - """Select video tracks from the input by specifying an attribute and an - attribute filter. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param attribute: Required. The TrackAttribute to filter the tracks by. - Possible values include: 'Bitrate', 'Language' - :type attribute: str or ~azure.mgmt.media.models.TrackAttribute - :param filter: Required. The type of AttributeFilter to apply to the - TrackAttribute in order to select the tracks. Possible values include: - 'All', 'Top', 'Bottom', 'ValueEquals' - :type filter: str or ~azure.mgmt.media.models.AttributeFilter - :param filter_value: The value to filter the tracks by. Only used when - AttributeFilter.ValueEquals is specified for the Filter property. For - TrackAttribute.Bitrate, this should be an integer value in bits per second - (e.g: '1500000'). The TrackAttribute.Language is not supported for video - tracks. - :type filter_value: str - """ - - _validation = { - 'odatatype': {'required': True}, - 'attribute': {'required': True}, - 'filter': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'attribute': {'key': 'attribute', 'type': 'str'}, - 'filter': {'key': 'filter', 'type': 'str'}, - 'filter_value': {'key': 'filterValue', 'type': 'str'}, - } - - def __init__(self, *, attribute, filter, filter_value: str=None, **kwargs) -> None: - super(SelectVideoTrackByAttribute, self).__init__(**kwargs) - self.attribute = attribute - self.filter = filter - self.filter_value = filter_value - self.odatatype = '#Microsoft.Media.SelectVideoTrackByAttribute' - - -class SelectVideoTrackById(VideoTrackDescriptor): - """Select video tracks from the input by specifying a track identifier. +class ResourceIdentity(Model): + """ResourceIdentity. All required parameters must be populated in order to send to Azure. - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param track_id: Required. Track identifier to select - :type track_id: long + :param user_assigned_identity: The user assigned managed identity's ARM ID + to use when accessing a resource. + :type user_assigned_identity: str + :param use_system_assigned_identity: Required. Indicates whether to use + System Assigned Managed Identity. Mutual exclusive with User Assigned + Managed Identity. + :type use_system_assigned_identity: bool """ _validation = { - 'odatatype': {'required': True}, - 'track_id': {'required': True}, + 'use_system_assigned_identity': {'required': True}, } _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'track_id': {'key': 'trackId', 'type': 'long'}, + 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, + 'use_system_assigned_identity': {'key': 'useSystemAssignedIdentity', 'type': 'bool'}, } - def __init__(self, *, track_id: int, **kwargs) -> None: - super(SelectVideoTrackById, self).__init__(**kwargs) - self.track_id = track_id - self.odatatype = '#Microsoft.Media.SelectVideoTrackById' + def __init__(self, *, use_system_assigned_identity: bool, user_assigned_identity: str=None, **kwargs) -> None: + super(ResourceIdentity, self).__init__(**kwargs) + self.user_assigned_identity = user_assigned_identity + self.use_system_assigned_identity = use_system_assigned_identity class ServiceSpecification(Model): @@ -6234,49 +3488,12 @@ def __init__(self, **kwargs) -> None: self.metric_specifications = None -class StandardEncoderPreset(Preset): - """Describes all the settings to be used when encoding the input video with - the Standard Encoder. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param filters: One or more filtering operations that are applied to the - input media before encoding. - :type filters: ~azure.mgmt.media.models.Filters - :param codecs: Required. The list of codecs to be used when encoding the - input video. - :type codecs: list[~azure.mgmt.media.models.Codec] - :param formats: Required. The list of outputs to be produced by the - encoder. - :type formats: list[~azure.mgmt.media.models.Format] - """ - - _validation = { - 'odatatype': {'required': True}, - 'codecs': {'required': True}, - 'formats': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'filters': {'key': 'filters', 'type': 'Filters'}, - 'codecs': {'key': 'codecs', 'type': '[Codec]'}, - 'formats': {'key': 'formats', 'type': '[Format]'}, - } - - def __init__(self, *, codecs, formats, filters=None, **kwargs) -> None: - super(StandardEncoderPreset, self).__init__(**kwargs) - self.filters = filters - self.codecs = codecs - self.formats = formats - self.odatatype = '#Microsoft.Media.StandardEncoderPreset' - - class StorageAccount(Model): """The storage account details. + Variables are only populated by the server, and will be ignored when + sending a request. + All required parameters must be populated in order to send to Azure. :param id: The ID of the storage account resource. Media Services relies @@ -6288,21 +3505,30 @@ class StorageAccount(Model): :param type: Required. The type of the storage account. Possible values include: 'Primary', 'Secondary' :type type: str or ~azure.mgmt.media.models.StorageAccountType + :param identity: The storage account identity. + :type identity: ~azure.mgmt.media.models.ResourceIdentity + :ivar status: The current status of the storage account mapping. + :vartype status: str """ _validation = { 'type': {'required': True}, + 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, + 'status': {'key': 'status', 'type': 'str'}, } - def __init__(self, *, type, id: str=None, **kwargs) -> None: + def __init__(self, *, type, id: str=None, identity=None, **kwargs) -> None: super(StorageAccount, self).__init__(**kwargs) self.id = id self.type = type + self.identity = identity + self.status = None class StorageEncryptedAssetDecryptionData(Model): @@ -6972,314 +4198,29 @@ def __init__(self, *, track_selections=None, **kwargs) -> None: self.track_selections = track_selections -class Transform(ProxyResource): - """A Transform encapsulates the rules or instructions for generating desired - outputs from input media, such as by transcoding or by extracting insights. - After the Transform is created, it can be applied to input media by - creating Jobs. +class UserAssignedManagedIdentity(Model): + """UserAssignedManagedIdentity. Variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to Azure. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - :vartype id: str - :ivar name: The name of the resource - :vartype name: str - :ivar type: The type of the resource. E.g. - "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - :vartype type: str - :ivar created: The UTC date and time when the Transform was created, in - 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype created: datetime - :param description: An optional verbose description of the Transform. - :type description: str - :ivar last_modified: The UTC date and time when the Transform was last - updated, in 'YYYY-MM-DDThh:mm:ssZ' format. - :vartype last_modified: datetime - :param outputs: Required. An array of one or more TransformOutputs that - the Transform should generate. - :type outputs: list[~azure.mgmt.media.models.TransformOutput] - :ivar system_data: The system metadata relating to this resource. - :vartype system_data: ~azure.mgmt.media.models.SystemData - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'created': {'readonly': True}, - 'last_modified': {'readonly': True}, - 'outputs': {'required': True}, - 'system_data': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'created': {'key': 'properties.created', 'type': 'iso-8601'}, - 'description': {'key': 'properties.description', 'type': 'str'}, - 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, - 'outputs': {'key': 'properties.outputs', 'type': '[TransformOutput]'}, - 'system_data': {'key': 'systemData', 'type': 'SystemData'}, - } - - def __init__(self, *, outputs, description: str=None, **kwargs) -> None: - super(Transform, self).__init__(**kwargs) - self.created = None - self.description = description - self.last_modified = None - self.outputs = outputs - self.system_data = None - - -class TransformOutput(Model): - """Describes the properties of a TransformOutput, which are the rules to be - applied while generating the desired output. - - All required parameters must be populated in order to send to Azure. - - :param on_error: A Transform can define more than one outputs. This - property defines what the service should do when one output fails - either - continue to produce other outputs, or, stop the other outputs. The overall - Job state will not reflect failures of outputs that are specified with - 'ContinueJob'. The default is 'StopProcessingJob'. Possible values - include: 'StopProcessingJob', 'ContinueJob' - :type on_error: str or ~azure.mgmt.media.models.OnErrorType - :param relative_priority: Sets the relative priority of the - TransformOutputs within a Transform. This sets the priority that the - service uses for processing TransformOutputs. The default priority is - Normal. Possible values include: 'Low', 'Normal', 'High' - :type relative_priority: str or ~azure.mgmt.media.models.Priority - :param preset: Required. Preset that describes the operations that will be - used to modify, transcode, or extract insights from the source file to - generate the output. - :type preset: ~azure.mgmt.media.models.Preset - """ - - _validation = { - 'preset': {'required': True}, - } - - _attribute_map = { - 'on_error': {'key': 'onError', 'type': 'str'}, - 'relative_priority': {'key': 'relativePriority', 'type': 'str'}, - 'preset': {'key': 'preset', 'type': 'Preset'}, - } - - def __init__(self, *, preset, on_error=None, relative_priority=None, **kwargs) -> None: - super(TransformOutput, self).__init__(**kwargs) - self.on_error = on_error - self.relative_priority = relative_priority - self.preset = preset - - -class TransportStreamFormat(MultiBitrateFormat): - """Describes the properties for generating an MPEG-2 Transport Stream (ISO/IEC - 13818-1) output video file(s). - - All required parameters must be populated in order to send to Azure. - - :param filename_pattern: Required. The pattern of the file names for the - generated output files. The following macros are supported in the file - name: {Basename} - An expansion macro that will use the name of the input - video file. If the base name(the file suffix is not included) of the input - video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file - exceeds 32 characters, the base name is truncated to the first 32 - characters in total length. {Extension} - The appropriate extension for - this format. {Label} - The label assigned to the codec/layer. {Index} - A - unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type - of the audio/video codec. {Resolution} - The video resolution. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param output_files: The list of output files to produce. Each entry in - the list is a set of audio and video layer labels to be muxed together . - :type output_files: list[~azure.mgmt.media.models.OutputFile] - """ - - _validation = { - 'filename_pattern': {'required': True}, - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, - } - - def __init__(self, *, filename_pattern: str, output_files=None, **kwargs) -> None: - super(TransportStreamFormat, self).__init__(filename_pattern=filename_pattern, output_files=output_files, **kwargs) - self.odatatype = '#Microsoft.Media.TransportStreamFormat' - - -class UtcClipTime(ClipTime): - """Specifies the clip time as a Utc time position in the media file. The Utc - time can point to a different position depending on whether the media file - starts from a timestamp of zero or not. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param time: Required. The time position on the timeline of the input - media based on Utc time. - :type time: datetime - """ - - _validation = { - 'odatatype': {'required': True}, - 'time': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'time': {'key': 'time', 'type': 'iso-8601'}, - } - - def __init__(self, *, time, **kwargs) -> None: - super(UtcClipTime, self).__init__(**kwargs) - self.time = time - self.odatatype = '#Microsoft.Media.UtcClipTime' - - -class VideoAnalyzerPreset(AudioAnalyzerPreset): - """A video analyzer preset that extracts insights (rich metadata) from both - audio and video, and outputs a JSON format file. - - All required parameters must be populated in order to send to Azure. - - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param audio_language: The language for the audio payload in the input - using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you - know the language of your content, it is recommended that you specify it. - The language must be specified explicitly for AudioAnalysisMode::Basic, - since automatic language detection is not included in basic mode. If the - language isn't specified or set to null, automatic language detection will - choose the first language detected and process with the selected language - for the duration of the file. It does not currently support dynamically - switching between languages after the first language is detected. The - automatic detection works best with audio recordings with clearly - discernable speech. If automatic detection fails to find the language, - transcription would fallback to 'en-US'." The list of supported languages - is available here: https://go.microsoft.com/fwlink/?linkid=2109463 - :type audio_language: str - :param mode: Determines the set of audio analysis operations to be - performed. If unspecified, the Standard AudioAnalysisMode would be chosen. - Possible values include: 'Standard', 'Basic' - :type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode - :param experimental_options: Dictionary containing key value pairs for - parameters not exposed in the preset itself - :type experimental_options: dict[str, str] - :param insights_to_extract: Defines the type of insights that you want the - service to generate. The allowed values are 'AudioInsightsOnly', - 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you - set this to AllInsights and the input is audio only, then only audio - insights are generated. Similarly if the input is video only, then only - video insights are generated. It is recommended that you not use - AudioInsightsOnly if you expect some of your inputs to be video only; or - use VideoInsightsOnly if you expect some of your inputs to be audio only. - Your Jobs in such conditions would error out. Possible values include: - 'AudioInsightsOnly', 'VideoInsightsOnly', 'AllInsights' - :type insights_to_extract: str or ~azure.mgmt.media.models.InsightsType - """ - - _validation = { - 'odatatype': {'required': True}, - } - - _attribute_map = { - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'audio_language': {'key': 'audioLanguage', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, - 'insights_to_extract': {'key': 'insightsToExtract', 'type': 'str'}, - } - - def __init__(self, *, audio_language: str=None, mode=None, experimental_options=None, insights_to_extract=None, **kwargs) -> None: - super(VideoAnalyzerPreset, self).__init__(audio_language=audio_language, mode=mode, experimental_options=experimental_options, **kwargs) - self.insights_to_extract = insights_to_extract - self.odatatype = '#Microsoft.Media.VideoAnalyzerPreset' - - -class VideoOverlay(Overlay): - """Describes the properties of a video overlay. - - All required parameters must be populated in order to send to Azure. - - :param input_label: Required. The label of the job input which is to be - used as an overlay. The Input must specify exactly one file. You can - specify an image file in JPG, PNG, GIF or BMP format, or an audio file - (such as a WAV, MP3, WMA or M4A file), or a video file. See - https://aka.ms/mesformats for the complete list of supported audio and - video file formats. - :type input_label: str - :param start: The start position, with reference to the input video, at - which the overlay starts. The value should be in ISO 8601 format. For - example, PT05S to start the overlay at 5 seconds into the input video. If - not specified the overlay starts from the beginning of the input video. - :type start: timedelta - :param end: The end position, with reference to the input video, at which - the overlay ends. The value should be in ISO 8601 format. For example, - PT30S to end the overlay at 30 seconds into the input video. If not - specified or the value is greater than the input video duration, the - overlay will be applied until the end of the input video if the overlay - media duration is greater than the input video duration, else the overlay - will last as long as the overlay media duration. - :type end: timedelta - :param fade_in_duration: The duration over which the overlay fades in onto - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade in (same as PT0S). - :type fade_in_duration: timedelta - :param fade_out_duration: The duration over which the overlay fades out of - the input video. The value should be in ISO 8601 duration format. If not - specified the default behavior is to have no fade out (same as PT0S). - :type fade_out_duration: timedelta - :param audio_gain_level: The gain level of audio in the overlay. The value - should be in the range [0, 1.0]. The default is 1.0. - :type audio_gain_level: float - :param odatatype: Required. Constant filled by server. - :type odatatype: str - :param position: The location in the input video where the overlay is - applied. - :type position: ~azure.mgmt.media.models.Rectangle - :param opacity: The opacity of the overlay. This is a value in the range - [0 - 1.0]. Default is 1.0 which mean the overlay is opaque. - :type opacity: float - :param crop_rectangle: An optional rectangular window used to crop the - overlay image or video. - :type crop_rectangle: ~azure.mgmt.media.models.Rectangle + :ivar client_id: The client ID. + :vartype client_id: str + :ivar principal_id: The principal ID. + :vartype principal_id: str """ _validation = { - 'input_label': {'required': True}, - 'odatatype': {'required': True}, + 'client_id': {'readonly': True}, + 'principal_id': {'readonly': True}, } _attribute_map = { - 'input_label': {'key': 'inputLabel', 'type': 'str'}, - 'start': {'key': 'start', 'type': 'duration'}, - 'end': {'key': 'end', 'type': 'duration'}, - 'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'}, - 'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'}, - 'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'}, - 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, - 'position': {'key': 'position', 'type': 'Rectangle'}, - 'opacity': {'key': 'opacity', 'type': 'float'}, - 'crop_rectangle': {'key': 'cropRectangle', 'type': 'Rectangle'}, + 'client_id': {'key': 'clientId', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, } - def __init__(self, *, input_label: str, start=None, end=None, fade_in_duration=None, fade_out_duration=None, audio_gain_level: float=None, position=None, opacity: float=None, crop_rectangle=None, **kwargs) -> None: - super(VideoOverlay, self).__init__(input_label=input_label, start=start, end=end, fade_in_duration=fade_in_duration, fade_out_duration=fade_out_duration, audio_gain_level=audio_gain_level, **kwargs) - self.position = position - self.opacity = opacity - self.crop_rectangle = crop_rectangle - self.odatatype = '#Microsoft.Media.VideoOverlay' + def __init__(self, **kwargs) -> None: + super(UserAssignedManagedIdentity, self).__init__(**kwargs) + self.client_id = None + self.principal_id = None diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_paged_models.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_paged_models.py index 5dc8138614d5..f35dd8949c84 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_paged_models.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_paged_models.py @@ -12,45 +12,32 @@ from msrest.paging import Paged -class AccountFilterPaged(Paged): - """ - A paging container for iterating over a list of :class:`AccountFilter ` object - """ - - _attribute_map = { - 'next_link': {'key': '@odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[AccountFilter]'} - } - - def __init__(self, *args, **kwargs): - - super(AccountFilterPaged, self).__init__(*args, **kwargs) -class OperationPaged(Paged): +class MediaServicePaged(Paged): """ - A paging container for iterating over a list of :class:`Operation ` object + A paging container for iterating over a list of :class:`MediaService ` object """ _attribute_map = { 'next_link': {'key': '@odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[Operation]'} + 'current_page': {'key': 'value', 'type': '[MediaService]'} } def __init__(self, *args, **kwargs): - super(OperationPaged, self).__init__(*args, **kwargs) -class MediaServicePaged(Paged): + super(MediaServicePaged, self).__init__(*args, **kwargs) +class AccountFilterPaged(Paged): """ - A paging container for iterating over a list of :class:`MediaService ` object + A paging container for iterating over a list of :class:`AccountFilter ` object """ _attribute_map = { 'next_link': {'key': '@odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[MediaService]'} + 'current_page': {'key': 'value', 'type': '[AccountFilter]'} } def __init__(self, *args, **kwargs): - super(MediaServicePaged, self).__init__(*args, **kwargs) + super(AccountFilterPaged, self).__init__(*args, **kwargs) class AssetPaged(Paged): """ A paging container for iterating over a list of :class:`Asset ` object @@ -90,32 +77,6 @@ class ContentKeyPolicyPaged(Paged): def __init__(self, *args, **kwargs): super(ContentKeyPolicyPaged, self).__init__(*args, **kwargs) -class TransformPaged(Paged): - """ - A paging container for iterating over a list of :class:`Transform ` object - """ - - _attribute_map = { - 'next_link': {'key': '@odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[Transform]'} - } - - def __init__(self, *args, **kwargs): - - super(TransformPaged, self).__init__(*args, **kwargs) -class JobPaged(Paged): - """ - A paging container for iterating over a list of :class:`Job ` object - """ - - _attribute_map = { - 'next_link': {'key': '@odata\\.nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[Job]'} - } - - def __init__(self, *args, **kwargs): - - super(JobPaged, self).__init__(*args, **kwargs) class StreamingPolicyPaged(Paged): """ A paging container for iterating over a list of :class:`StreamingPolicy ` object diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py index 44e5a835708e..a64d2f3bf1ef 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py @@ -9,17 +9,15 @@ # regenerated. # -------------------------------------------------------------------------- -from ._account_filters_operations import AccountFiltersOperations from ._operations import Operations from ._mediaservices_operations import MediaservicesOperations from ._private_link_resources_operations import PrivateLinkResourcesOperations from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations from ._locations_operations import LocationsOperations +from ._account_filters_operations import AccountFiltersOperations from ._assets_operations import AssetsOperations from ._asset_filters_operations import AssetFiltersOperations from ._content_key_policies_operations import ContentKeyPoliciesOperations -from ._transforms_operations import TransformsOperations -from ._jobs_operations import JobsOperations from ._streaming_policies_operations import StreamingPoliciesOperations from ._streaming_locators_operations import StreamingLocatorsOperations from ._live_events_operations import LiveEventsOperations @@ -27,17 +25,15 @@ from ._streaming_endpoints_operations import StreamingEndpointsOperations __all__ = [ - 'AccountFiltersOperations', 'Operations', 'MediaservicesOperations', 'PrivateLinkResourcesOperations', 'PrivateEndpointConnectionsOperations', 'LocationsOperations', + 'AccountFiltersOperations', 'AssetsOperations', 'AssetFiltersOperations', 'ContentKeyPoliciesOperations', - 'TransformsOperations', - 'JobsOperations', 'StreamingPoliciesOperations', 'StreamingLocatorsOperations', 'LiveEventsOperations', diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py index 83f402f99996..b341a3698048 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py @@ -24,7 +24,7 @@ class AccountFiltersOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -58,7 +58,7 @@ def list( :rtype: ~azure.mgmt.media.models.AccountFilterPaged[~azure.mgmt.media.models.AccountFilter] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -99,7 +99,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -134,7 +134,7 @@ def get( :rtype: ~azure.mgmt.media.models.AccountFilter or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -164,8 +164,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -202,7 +202,7 @@ def create_or_update( :rtype: ~azure.mgmt.media.models.AccountFilter or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create_or_update.metadata['url'] @@ -237,7 +237,7 @@ def create_or_update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -273,7 +273,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -303,7 +303,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -334,7 +334,7 @@ def update( :rtype: ~azure.mgmt.media.models.AccountFilter or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.update.metadata['url'] @@ -369,7 +369,7 @@ def update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py index 3d7f9b052af6..f7f2067b1c70 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py @@ -24,7 +24,7 @@ class AssetFiltersOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -60,7 +60,7 @@ def list( :rtype: ~azure.mgmt.media.models.AssetFilterPaged[~azure.mgmt.media.models.AssetFilter] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -102,7 +102,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -139,7 +139,7 @@ def get( :rtype: ~azure.mgmt.media.models.AssetFilter or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -170,8 +170,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -210,7 +210,7 @@ def create_or_update( :rtype: ~azure.mgmt.media.models.AssetFilter or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create_or_update.metadata['url'] @@ -246,7 +246,7 @@ def create_or_update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -284,7 +284,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -315,7 +315,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -348,7 +348,7 @@ def update( :rtype: ~azure.mgmt.media.models.AssetFilter or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.update.metadata['url'] @@ -384,7 +384,7 @@ def update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py index 567c5df52671..3f7ef04f386c 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py @@ -24,7 +24,7 @@ class AssetsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -68,7 +68,7 @@ def list( :rtype: ~azure.mgmt.media.models.AssetPaged[~azure.mgmt.media.models.Asset] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -115,7 +115,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -150,7 +150,7 @@ def get( :rtype: ~azure.mgmt.media.models.Asset or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -180,8 +180,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -218,7 +218,7 @@ def create_or_update( :rtype: ~azure.mgmt.media.models.Asset or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create_or_update.metadata['url'] @@ -253,7 +253,7 @@ def create_or_update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -289,7 +289,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -319,7 +319,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -350,7 +350,7 @@ def update( :rtype: ~azure.mgmt.media.models.Asset or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.update.metadata['url'] @@ -385,7 +385,7 @@ def update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -429,7 +429,7 @@ def list_container_sas( :rtype: ~azure.mgmt.media.models.AssetContainerSas or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ parameters = models.ListContainerSasInput(permissions=permissions, expiry_time=expiry_time) @@ -466,7 +466,7 @@ def list_container_sas( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -503,7 +503,7 @@ def get_encryption_key( :rtype: ~azure.mgmt.media.models.StorageEncryptedAssetDecryptionData or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get_encryption_key.metadata['url'] @@ -534,7 +534,7 @@ def get_encryption_key( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -570,7 +570,7 @@ def list_streaming_locators( :rtype: ~azure.mgmt.media.models.ListStreamingLocatorsResponse or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.list_streaming_locators.metadata['url'] @@ -601,7 +601,7 @@ def list_streaming_locators( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py index 8d842a870dd3..c044e7a0edfb 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py @@ -24,7 +24,7 @@ class ContentKeyPoliciesOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -67,7 +67,7 @@ def list( :rtype: ~azure.mgmt.media.models.ContentKeyPolicyPaged[~azure.mgmt.media.models.ContentKeyPolicy] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -114,7 +114,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -149,7 +149,7 @@ def get( :rtype: ~azure.mgmt.media.models.ContentKeyPolicy or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -179,8 +179,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -219,7 +219,7 @@ def create_or_update( :rtype: ~azure.mgmt.media.models.ContentKeyPolicy or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ parameters = models.ContentKeyPolicy(description=description, options=options) @@ -256,7 +256,7 @@ def create_or_update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -292,7 +292,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -322,7 +322,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -355,7 +355,7 @@ def update( :rtype: ~azure.mgmt.media.models.ContentKeyPolicy or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ parameters = models.ContentKeyPolicy(description=description, options=options) @@ -392,7 +392,7 @@ def update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -427,7 +427,7 @@ def get_policy_properties_with_secrets( :rtype: ~azure.mgmt.media.models.ContentKeyPolicyProperties or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get_policy_properties_with_secrets.metadata['url'] @@ -457,8 +457,8 @@ def get_policy_properties_with_secrets( request = self._client.post(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py deleted file mode 100644 index d83a92f00c1e..000000000000 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py +++ /dev/null @@ -1,468 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class JobsOperations(object): - """JobsOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2020-05-01" - - self.config = config - - def list( - self, resource_group_name, account_name, transform_name, filter=None, orderby=None, custom_headers=None, raw=False, **operation_config): - """List Jobs. - - Lists all of the Jobs for the Transform. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param filter: Restricts the set of items returned. - :type filter: str - :param orderby: Specifies the key by which the result collection - should be ordered. - :type orderby: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of Job - :rtype: - ~azure.mgmt.media.models.JobPaged[~azure.mgmt.media.models.Job] - :raises: - :class:`ApiErrorException` - """ - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if orderby is not None: - query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.JobPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs'} - - def get( - self, resource_group_name, account_name, transform_name, job_name, custom_headers=None, raw=False, **operation_config): - """Get Job. - - Gets a Job. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param job_name: The Job name. - :type job_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Job or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.media.models.Job or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str'), - 'jobName': self._serialize.url("job_name", job_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Job', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}'} - - def create( - self, resource_group_name, account_name, transform_name, job_name, parameters, custom_headers=None, raw=False, **operation_config): - """Create Job. - - Creates a Job. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param job_name: The Job name. - :type job_name: str - :param parameters: The request parameters - :type parameters: ~azure.mgmt.media.models.Job - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Job or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.media.models.Job or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str'), - 'jobName': self._serialize.url("job_name", job_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct body - body_content = self._serialize.body(parameters, 'Job') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [201]: - raise models.ApiErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 201: - deserialized = self._deserialize('Job', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}'} - - def delete( - self, resource_group_name, account_name, transform_name, job_name, custom_headers=None, raw=False, **operation_config): - """Delete Job. - - Deletes a Job. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param job_name: The Job name. - :type job_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str'), - 'jobName': self._serialize.url("job_name", job_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}'} - - def update( - self, resource_group_name, account_name, transform_name, job_name, parameters, custom_headers=None, raw=False, **operation_config): - """Update Job. - - Update is only supported for description and priority. Updating - Priority will take effect when the Job state is Queued or Scheduled and - depending on the timing the priority update may be ignored. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param job_name: The Job name. - :type job_name: str - :param parameters: The request parameters - :type parameters: ~azure.mgmt.media.models.Job - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Job or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.media.models.Job or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str'), - 'jobName': self._serialize.url("job_name", job_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct body - body_content = self._serialize.body(parameters, 'Job') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Job', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}'} - - def cancel_job( - self, resource_group_name, account_name, transform_name, job_name, custom_headers=None, raw=False, **operation_config): - """Cancel Job. - - Cancel a Job. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param job_name: The Job name. - :type job_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.cancel_job.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str'), - 'jobName': self._serialize.url("job_name", job_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - cancel_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}/cancelJob'} diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py index 52939d530026..77b2a3ece796 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py @@ -26,7 +26,7 @@ class LiveEventsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -60,7 +60,7 @@ def list( :rtype: ~azure.mgmt.media.models.LiveEventPaged[~azure.mgmt.media.models.LiveEvent] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -101,7 +101,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -137,7 +137,7 @@ def get( :rtype: ~azure.mgmt.media.models.LiveEvent or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -167,8 +167,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -219,7 +219,7 @@ def _create_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None @@ -265,7 +265,7 @@ def create( or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.media.models.LiveEvent]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._create_initial( resource_group_name=resource_group_name, @@ -332,7 +332,7 @@ def _update_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None @@ -373,7 +373,7 @@ def update( or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.media.models.LiveEvent]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._update_initial( resource_group_name=resource_group_name, @@ -434,7 +434,7 @@ def _delete_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -464,7 +464,7 @@ def delete( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._delete_initial( resource_group_name=resource_group_name, @@ -520,7 +520,7 @@ def _allocate_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -551,7 +551,7 @@ def allocate( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._allocate_initial( resource_group_name=resource_group_name, @@ -607,7 +607,7 @@ def _start_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -638,7 +638,7 @@ def start( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._start_initial( resource_group_name=resource_group_name, @@ -700,7 +700,7 @@ def _stop_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -734,7 +734,7 @@ def stop( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._stop_initial( resource_group_name=resource_group_name, @@ -791,7 +791,7 @@ def _reset_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -824,7 +824,7 @@ def reset( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._reset_initial( resource_group_name=resource_group_name, diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py index ab3db379063e..14fcbe512d76 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py @@ -26,7 +26,7 @@ class LiveOutputsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -63,7 +63,7 @@ def list( :rtype: ~azure.mgmt.media.models.LiveOutputPaged[~azure.mgmt.media.models.LiveOutput] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -105,7 +105,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -143,7 +143,7 @@ def get( :rtype: ~azure.mgmt.media.models.LiveOutput or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -174,8 +174,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -225,7 +225,7 @@ def _create_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None @@ -270,7 +270,7 @@ def create( or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.media.models.LiveOutput]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._create_initial( resource_group_name=resource_group_name, @@ -333,7 +333,7 @@ def _delete_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -366,7 +366,7 @@ def delete( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._delete_initial( resource_group_name=resource_group_name, diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py index 9c57d5f8d823..e3ba741e2abb 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py @@ -24,7 +24,7 @@ class LocationsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2021-05-01" + self.api_version = "2021-06-01" self.config = config @@ -61,7 +61,7 @@ def check_name_availability( :rtype: ~azure.mgmt.media.models.EntityNameAvailabilityCheckOutput or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ parameters = models.CheckNameAvailabilityInput(name=name, type=type) @@ -96,7 +96,7 @@ def check_name_availability( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py index e81181498cf8..9e6891518319 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py @@ -24,7 +24,7 @@ class MediaservicesOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2021-05-01" + self.api_version = "2021-06-01" self.config = config @@ -56,7 +56,7 @@ def list( :rtype: ~azure.mgmt.media.models.MediaServicePaged[~azure.mgmt.media.models.MediaService] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -96,7 +96,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -129,7 +129,7 @@ def get( :rtype: ~azure.mgmt.media.models.MediaService or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -159,7 +159,7 @@ def get( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -194,7 +194,7 @@ def create_or_update( :rtype: ~azure.mgmt.media.models.MediaService or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create_or_update.metadata['url'] @@ -228,7 +228,7 @@ def create_or_update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -262,7 +262,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -291,7 +291,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -320,7 +320,7 @@ def update( :rtype: ~azure.mgmt.media.models.MediaService or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.update.metadata['url'] @@ -354,7 +354,7 @@ def update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -389,7 +389,7 @@ def sync_storage_keys( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ parameters = models.SyncStorageKeysInput(id=id) @@ -424,7 +424,7 @@ def sync_storage_keys( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -455,7 +455,7 @@ def list_edge_policies( :rtype: ~azure.mgmt.media.models.EdgePolicies or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ parameters = models.ListEdgePoliciesInput(device_id=device_id) @@ -491,7 +491,7 @@ def list_edge_policies( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -519,7 +519,7 @@ def list_by_subscription( :rtype: ~azure.mgmt.media.models.MediaServicePaged[~azure.mgmt.media.models.MediaService] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -558,7 +558,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py index f240bca04db6..e646126a6142 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py @@ -24,7 +24,7 @@ class Operations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2021-05-01" + self.api_version = "2021-06-01" self.config = config @@ -49,54 +49,43 @@ def list( deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: An iterator like instance of Operation - :rtype: - ~azure.mgmt.media.models.OperationPaged[~azure.mgmt.media.models.Operation] + :return: OperationCollection or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.media.models.OperationCollection or + ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] + # Construct URL + url = self.list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('OperationCollection', response) - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None if raw: - header_dict = {} - deserialized = models.OperationPaged(internal_paging, self._deserialize.dependencies, header_dict) + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response return deserialized list.metadata = {'url': '/providers/Microsoft.Media/operations'} diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py index 3bdd096a4da1..8fa58e782a4c 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py @@ -24,7 +24,7 @@ class PrivateEndpointConnectionsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2021-05-01" + self.api_version = "2021-06-01" self.config = config @@ -59,7 +59,7 @@ def list( :rtype: ~azure.mgmt.media.models.PrivateEndpointConnectionListResult or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.list.metadata['url'] @@ -89,7 +89,7 @@ def list( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -124,7 +124,7 @@ def get( :rtype: ~azure.mgmt.media.models.PrivateEndpointConnection or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -155,7 +155,7 @@ def get( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -192,7 +192,7 @@ def create_or_update( :rtype: ~azure.mgmt.media.models.PrivateEndpointConnection or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create_or_update.metadata['url'] @@ -227,7 +227,7 @@ def create_or_update( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -261,7 +261,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -290,8 +290,8 @@ def delete( request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200, 204]: + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py index 71808c5c481e..0a7cea4b848f 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py @@ -24,7 +24,7 @@ class PrivateLinkResourcesOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2021-05-01" + self.api_version = "2021-06-01" self.config = config @@ -59,7 +59,7 @@ def list( :rtype: ~azure.mgmt.media.models.PrivateLinkResourceListResult or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.list.metadata['url'] @@ -89,7 +89,7 @@ def list( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -124,7 +124,7 @@ def get( :rtype: ~azure.mgmt.media.models.PrivateLinkResource or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -155,7 +155,7 @@ def get( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py index a8c0165c4c3a..d4ef3ad4383f 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py @@ -26,7 +26,7 @@ class StreamingEndpointsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -60,7 +60,7 @@ def list( :rtype: ~azure.mgmt.media.models.StreamingEndpointPaged[~azure.mgmt.media.models.StreamingEndpoint] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -101,7 +101,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -137,7 +137,7 @@ def get( :rtype: ~azure.mgmt.media.models.StreamingEndpoint or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -167,8 +167,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -219,7 +219,7 @@ def _create_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None @@ -265,7 +265,7 @@ def create( or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.media.models.StreamingEndpoint]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._create_initial( resource_group_name=resource_group_name, @@ -332,7 +332,7 @@ def _update_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None @@ -375,7 +375,7 @@ def update( or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.media.models.StreamingEndpoint]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._update_initial( resource_group_name=resource_group_name, @@ -436,7 +436,7 @@ def _delete_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -466,7 +466,7 @@ def delete( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._delete_initial( resource_group_name=resource_group_name, @@ -522,7 +522,7 @@ def _start_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -552,7 +552,7 @@ def start( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._start_initial( resource_group_name=resource_group_name, @@ -608,7 +608,7 @@ def _stop_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -638,7 +638,7 @@ def stop( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._stop_initial( resource_group_name=resource_group_name, @@ -700,7 +700,7 @@ def _scale_initial( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -732,7 +732,7 @@ def scale( :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ raw_result = self._scale_initial( resource_group_name=resource_group_name, diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py index f90b5cbf4161..028b74e43e56 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py @@ -24,7 +24,7 @@ class StreamingLocatorsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -67,7 +67,7 @@ def list( :rtype: ~azure.mgmt.media.models.StreamingLocatorPaged[~azure.mgmt.media.models.StreamingLocator] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -114,7 +114,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -149,7 +149,7 @@ def get( :rtype: ~azure.mgmt.media.models.StreamingLocator or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -179,8 +179,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -217,7 +217,7 @@ def create( :rtype: ~azure.mgmt.media.models.StreamingLocator or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create.metadata['url'] @@ -252,7 +252,7 @@ def create( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 201: @@ -286,7 +286,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -316,7 +316,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) @@ -345,7 +345,7 @@ def list_content_keys( :rtype: ~azure.mgmt.media.models.ListContentKeysResponse or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.list_content_keys.metadata['url'] @@ -376,7 +376,7 @@ def list_content_keys( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -411,7 +411,7 @@ def list_paths( :rtype: ~azure.mgmt.media.models.ListPathsResponse or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.list_paths.metadata['url'] @@ -442,7 +442,7 @@ def list_paths( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py index 90272113dc28..d3bd2bfc5b93 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py @@ -24,7 +24,7 @@ class StreamingPoliciesOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". + :ivar api_version: The version of the API to be used with the client request. Constant value: "2021-06-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2020-05-01" + self.api_version = "2021-06-01" self.config = config @@ -67,7 +67,7 @@ def list( :rtype: ~azure.mgmt.media.models.StreamingPolicyPaged[~azure.mgmt.media.models.StreamingPolicy] :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ def prepare_request(next_link=None): if not next_link: @@ -114,7 +114,7 @@ def internal_paging(next_link=None): response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) return response @@ -149,7 +149,7 @@ def get( :rtype: ~azure.mgmt.media.models.StreamingPolicy or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.get.metadata['url'] @@ -179,8 +179,8 @@ def get( request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: @@ -217,7 +217,7 @@ def create( :rtype: ~azure.mgmt.media.models.StreamingPolicy or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.create.metadata['url'] @@ -252,7 +252,7 @@ def create( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [201]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 201: @@ -286,7 +286,7 @@ def delete( :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ApiErrorException` + :class:`ErrorResponseException` """ # Construct URL url = self.delete.metadata['url'] @@ -316,7 +316,7 @@ def delete( response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) + raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py deleted file mode 100644 index e2bad8cb194f..000000000000 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py +++ /dev/null @@ -1,402 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse - -from .. import models - - -class TransformsOperations(object): - """TransformsOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: The version of the API to be used with the client request. Constant value: "2020-05-01". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2020-05-01" - - self.config = config - - def list( - self, resource_group_name, account_name, filter=None, orderby=None, custom_headers=None, raw=False, **operation_config): - """List Transforms. - - Lists the Transforms in the account. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param filter: Restricts the set of items returned. - :type filter: str - :param orderby: Specifies the key by which the result collection - should be ordered. - :type orderby: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of Transform - :rtype: - ~azure.mgmt.media.models.TransformPaged[~azure.mgmt.media.models.Transform] - :raises: - :class:`ApiErrorException` - """ - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if filter is not None: - query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') - if orderby is not None: - query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.TransformPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms'} - - def get( - self, resource_group_name, account_name, transform_name, custom_headers=None, raw=False, **operation_config): - """Get Transform. - - Gets a Transform. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Transform or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.media.models.Transform or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 404]: - raise models.ApiErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Transform', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} - - def create_or_update( - self, resource_group_name, account_name, transform_name, outputs, description=None, custom_headers=None, raw=False, **operation_config): - """Create or Update Transform. - - Creates or updates a new Transform. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param outputs: An array of one or more TransformOutputs that the - Transform should generate. - :type outputs: list[~azure.mgmt.media.models.TransformOutput] - :param description: An optional verbose description of the Transform. - :type description: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Transform or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.media.models.Transform or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - parameters = models.Transform(description=description, outputs=outputs) - - # Construct URL - url = self.create_or_update.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct body - body_content = self._serialize.body(parameters, 'Transform') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 201]: - raise models.ApiErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Transform', response) - if response.status_code == 201: - deserialized = self._deserialize('Transform', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} - - def delete( - self, resource_group_name, account_name, transform_name, custom_headers=None, raw=False, **operation_config): - """Delete Transform. - - Deletes a Transform. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 204]: - raise models.ApiErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} - - def update( - self, resource_group_name, account_name, transform_name, outputs, description=None, custom_headers=None, raw=False, **operation_config): - """Update Transform. - - Updates a Transform. - - :param resource_group_name: The name of the resource group within the - Azure subscription. - :type resource_group_name: str - :param account_name: The Media Services account name. - :type account_name: str - :param transform_name: The Transform name. - :type transform_name: str - :param outputs: An array of one or more TransformOutputs that the - Transform should generate. - :type outputs: list[~azure.mgmt.media.models.TransformOutput] - :param description: An optional verbose description of the Transform. - :type description: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: Transform or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.media.models.Transform or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ApiErrorException` - """ - parameters = models.Transform(description=description, outputs=outputs) - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - 'transformName': self._serialize.url("transform_name", transform_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct body - body_content = self._serialize.body(parameters, 'Transform') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ApiErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('Transform', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'}