diff --git a/_vendor/gapic-google-cloud-vision-v1/LICENSE b/_vendor/gapic-google-cloud-vision-v1/LICENSE new file mode 100644 index 000000000000..724a8807144b --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/_vendor/gapic-google-cloud-vision-v1/MANIFEST.in b/_vendor/gapic-google-cloud-vision-v1/MANIFEST.in new file mode 100644 index 000000000000..5fffff88c833 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/MANIFEST.in @@ -0,0 +1,6 @@ +include README.rst LICENSE +global-include *.json +graft google +global-exclude *.py[co] +global-exclude __pycache__ +prune .tox diff --git a/_vendor/gapic-google-cloud-vision-v1/PUBLISHING.rst b/_vendor/gapic-google-cloud-vision-v1/PUBLISHING.rst new file mode 100644 index 000000000000..a6d81225248b --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/PUBLISHING.rst @@ -0,0 +1,46 @@ +PUBLISHING +---------- + +Note: This folder has been generated by the GAPIC code generator. + +The instructions assumes that no changes have been made to folder and its +contents since it was created. + +PREREQUISITES +------------- + +- Python must installed +- [tox](https://testrun.org/tox/latest/) must be installed + + +TO PUBLISH +---------- + +- Make sure you have `an account`_ on pypi_. +- Publish your package using tox. +- *tox must be used here or the uploaded package will be invalid!!* + + :: + + tox -e upload-package + + +TO PUBLISH THE DOCS +------------------- + +- Create the docs + + :: + + tox -e docs + +- Publish them to pythonhosted.org + + :: + + tox -e upload-docs + + +_`Packaging and Distributing projects`: https://packaging.python.org/en/latest/distributing.html#uploading-your-project-to-pypi +_`an account`: https://pypi.python.org/pypi?%3Aaction=register_form +_pypi: http://pypi.python.org diff --git a/_vendor/gapic-google-cloud-vision-v1/README.rst b/_vendor/gapic-google-cloud-vision-v1/README.rst new file mode 100644 index 000000000000..345e4552d989 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/README.rst @@ -0,0 +1,78 @@ +GAPIC library for the Google Cloud Vision API +================================================================================ + +gapic-google-cloud-vision-v1 uses google-gax_ (Google API extensions) to provide an +easy-to-use client library for the `Google Cloud Vision API`_ (v1) defined in the googleapis_ git repository + + +.. _`googleapis`: https://github.com/googleapis/googleapis/tree/master/google/cloud/vision/v1 +.. _`google-gax`: https://github.com/googleapis/gax-python +.. _`Google Cloud Vision API API`: https://developers.google.com/apis-explorer/?hl=en_US#p/vision/v1 + +Getting started +--------------- + +gapic-google-cloud-vision-v1 will allow you to connect to the +Google Cloud Vision API and access all its methods. In order to do this, you need +to set up authentication as well as install the library locally. + + +Setup Authentication +~~~~~~~~~~~~~~~~~~~~ + +To authenticate all your API calls, first install and setup the `Google Cloud SDK`_. +Once done, you can then run the following command in your terminal: + +.. code-block:: console + + $ gcloud beta auth application-default login + +or + +.. code-block:: console + + $ gcloud auth login + +Please see `gcloud beta auth application-default login`_ document for the difference between these commands. + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ +.. _gcloud beta auth application-default login: https://cloud.google.com/sdk/gcloud/reference/beta/auth/application-default/login + + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +~~~~~~~~~~ + +.. code-block:: console + + pip install virtualenv + virtualenv + source /bin/activate + /bin/pip install gapic-google-cloud-vision-v1 + + +Windows +~~~~~~~ + +.. code-block:: console + + pip install virtualenv + virtualenv + \Scripts\activate + \Scripts\pip.exe install gapic-google-cloud-vision-v1 + + +At this point you are all set to continue. diff --git a/_vendor/gapic-google-cloud-vision-v1/docs/apis.rst b/_vendor/gapic-google-cloud-vision-v1/docs/apis.rst new file mode 100644 index 000000000000..32638040956b --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/docs/apis.rst @@ -0,0 +1,19 @@ +API Reference +============= + +APIs +---- + +.. autosummary:: + :toctree: + + google.cloud.gapic.vision.v1.image_annotator_client + + +API types +~~~~~~~~~ + +.. autosummary:: + :toctree: + + google.cloud.gapic.vision.v1.enums diff --git a/_vendor/gapic-google-cloud-vision-v1/docs/conf.py b/_vendor/gapic-google-cloud-vision-v1/docs/conf.py new file mode 100644 index 000000000000..b3ba18c2f1e4 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/docs/conf.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +# +# gapic-google-cloud-vision-v1 documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('..')) + +__version__ = '0.15.0' + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.coverage', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', +] + +# autodoc/autosummary flags +autoclass_content = 'both' +autodoc_default_flags = ['members'] +autosummary_generate = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'gapic-google-cloud-vision-v1' +copyright = u'2016, Google' +author = u'Google APIs' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = '.'.join(release.split('.')[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = [] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'gapic-google-cloud-vision-v1-doc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + #'preamble': '', + + # Latex figure (float) alignment + #'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'gapic-google-cloud-vision-v1.tex', + u'gapic-google-cloud-vision-v1 Documentation', author, 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'gapic-google-cloud-vision-v1', + u'gapic-google-cloud-vision-v1 Documentation', [author], 1)] + +# If true, show URL addresses after external links. +#man_show_urls = False + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'gapic-google-cloud-vision-v1', + u'gapic-google-cloud-vision-v1 Documentation', author, + 'gapic-google-cloud-vision-v1', + 'GAPIC library for the {metadata.shortName} v1 service', 'APIs'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @etailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'python': ('http://python.readthedocs.org/en/latest/', None), + 'gax': ('https://gax-python.readthedocs.org/en/latest/', None), +} + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/_vendor/gapic-google-cloud-vision-v1/docs/index.rst b/_vendor/gapic-google-cloud-vision-v1/docs/index.rst new file mode 100644 index 000000000000..29b913c021ca --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/docs/index.rst @@ -0,0 +1,41 @@ +.. gapic-google-cloud-vision-v1 sphinx documentation master file + + +GAPIC library for the Google Cloud Vision API +============================================================================================================= + +This is the API documentation for ``gapic-google-cloud-vision-v1``. + +gapic-google-cloud-vision-v1 uses google-gax_ (Google API extensions) to provide an +easy-to-use client library for the `Google Cloud Vision API`_ (v1) defined in the googleapis_ git repository + + +.. _`google-gax`: https://github.com/googleapis/gax-python +.. _`googleapis`: https://github.com/googleapis/googleapis/tree/master/google/google/cloud/vision/v1 +.. _`Google Cloud Vision API`: https://developers.google.com/apis-explorer/?hl=en_US#p/vision/v1/ + + +APIs +---- + +.. autosummary:: + + google.cloud.gapic.vision.v1.image_annotator_client + + +Contents +-------- + +.. toctree:: + + self + starting + apis + + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/_vendor/gapic-google-cloud-vision-v1/docs/starting.rst b/_vendor/gapic-google-cloud-vision-v1/docs/starting.rst new file mode 100644 index 000000000000..e24c61080bd7 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/docs/starting.rst @@ -0,0 +1,78 @@ +Getting started +=============== + +gapic-google-cloud-vision-v1 will allow you to connect to the `Google Cloud Vision API`_ and access all its methods. In order to achieve this, you need to set up authentication as well as install the library locally. + +.. _`Google Cloud Vision API`: https://developers.google.com/apis-explorer/?hl=en_US#p/vision/v1/ + + +Installation +------------ + + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +~~~~~~~~~~ + +.. code-block:: console + + pip install virtualenv + virtualenv + source /bin/activate + /bin/pip install gapic-google-cloud-vision-v1 + +Windows +~~~~~~~ + +.. code-block:: console + + pip install virtualenv + virtualenv + \Scripts\activate + \Scripts\pip.exe install gapic-google-cloud-vision-v1 + + +Using the API +------------- + + +Authentication +~~~~~~~~~~~~~~ + +To authenticate all your API calls, first install and setup the `Google Cloud SDK`_. +Once done, you can then run the following command in your terminal: + +.. code-block:: console + + $ gcloud beta auth application-default login + +or + +.. code-block:: console + + $ gcloud auth login + +Please see `gcloud beta auth application-default login`_ document for the difference between these commands. + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ +.. _gcloud beta auth application-default login: https://cloud.google.com/sdk/gcloud/reference/beta/auth/application-default/login +.. code-block:: console + +At this point you are all set to continue. + + +Examples +~~~~~~~~ + +To see example usage, please read through the :doc:`API reference `. The +documentation for each API method includes simple examples. diff --git a/_vendor/gapic-google-cloud-vision-v1/google/__init__.py b/_vendor/gapic-google-cloud-vision-v1/google/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/__init__.py b/_vendor/gapic-google-cloud-vision-v1/google/cloud/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/cloud/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/__init__.py b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/__init__.py b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/__init__.py b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/enums.py b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/enums.py new file mode 100644 index 000000000000..9a4b3c4e45a8 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/enums.py @@ -0,0 +1,195 @@ +# Copyright 2016 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for protocol buffer enum types.""" + + +class TextAnnotation(object): + class DetectedBreak(object): + class BreakType(object): + """ + Enum to denote the type of break found. New line, space etc. + + Attributes: + UNKNOWN (int): Unknown break label type. + SPACE (int): Regular space. + SURE_SPACE (int): Sure space (very wide). + EOL_SURE_SPACE (int): Line-wrapping break. + HYPHEN (int): End-line hyphen that is not present in text; does + LINE_BREAK (int): not co-occur with SPACE, LEADER_SPACE, or + LINE_BREAK. + Line break that ends a paragraph. + """ + UNKNOWN = 0 + SPACE = 1 + SURE_SPACE = 2 + EOL_SURE_SPACE = 3 + HYPHEN = 4 + LINE_BREAK = 5 + + +class Block(object): + class BlockType(object): + """ + Type of a block (text, image etc) as identified by OCR. + + Attributes: + UNKNOWN (int): Unknown block type. + TEXT (int): Regular text block. + TABLE (int): Table block. + PICTURE (int): Image block. + RULER (int): Horizontal/vertical line box. + BARCODE (int): Barcode block. + """ + UNKNOWN = 0 + TEXT = 1 + TABLE = 2 + PICTURE = 3 + RULER = 4 + BARCODE = 5 + + +class Likelihood(object): + """ + A bucketized representation of likelihood, which is intended to give clients + highly stable results across model upgrades. + + Attributes: + UNKNOWN (int): Unknown likelihood. + VERY_UNLIKELY (int): It is very unlikely that the image belongs to the specified vertical. + UNLIKELY (int): It is unlikely that the image belongs to the specified vertical. + POSSIBLE (int): It is possible that the image belongs to the specified vertical. + LIKELY (int): It is likely that the image belongs to the specified vertical. + VERY_LIKELY (int): It is very likely that the image belongs to the specified vertical. + """ + UNKNOWN = 0 + VERY_UNLIKELY = 1 + UNLIKELY = 2 + POSSIBLE = 3 + LIKELY = 4 + VERY_LIKELY = 5 + + +class Feature(object): + class Type(object): + """ + Type of image feature. + + Attributes: + TYPE_UNSPECIFIED (int): Unspecified feature type. + FACE_DETECTION (int): Run face detection. + LANDMARK_DETECTION (int): Run landmark detection. + LOGO_DETECTION (int): Run logo detection. + LABEL_DETECTION (int): Run label detection. + TEXT_DETECTION (int): Run OCR. + DOCUMENT_TEXT_DETECTION (int): Run dense text document OCR. Takes precedence when both + DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + SAFE_SEARCH_DETECTION (int): Run computer vision models to compute image safe-search properties. + IMAGE_PROPERTIES (int): Compute a set of image properties, such as the image's dominant colors. + CROP_HINTS (int): Run crop hints. + WEB_ANNOTATION (int): Run web annotation. + """ + TYPE_UNSPECIFIED = 0 + FACE_DETECTION = 1 + LANDMARK_DETECTION = 2 + LOGO_DETECTION = 3 + LABEL_DETECTION = 4 + TEXT_DETECTION = 5 + DOCUMENT_TEXT_DETECTION = 11 + SAFE_SEARCH_DETECTION = 6 + IMAGE_PROPERTIES = 7 + CROP_HINTS = 9 + WEB_ANNOTATION = 10 + + +class FaceAnnotation(object): + class Landmark(object): + class Type(object): + """ + Face landmark (feature) type. + Left and right are defined from the vantage of the viewer of the image + without considering mirror projections typical of photos. So, ``LEFT_EYE``, + typically, is the person's right eye. + + Attributes: + UNKNOWN_LANDMARK (int): Unknown face landmark detected. Should not be filled. + LEFT_EYE (int): Left eye. + RIGHT_EYE (int): Right eye. + LEFT_OF_LEFT_EYEBROW (int): Left of left eyebrow. + RIGHT_OF_LEFT_EYEBROW (int): Right of left eyebrow. + LEFT_OF_RIGHT_EYEBROW (int): Left of right eyebrow. + RIGHT_OF_RIGHT_EYEBROW (int): Right of right eyebrow. + MIDPOINT_BETWEEN_EYES (int): Midpoint between eyes. + NOSE_TIP (int): Nose tip. + UPPER_LIP (int): Upper lip. + LOWER_LIP (int): Lower lip. + MOUTH_LEFT (int): Mouth left. + MOUTH_RIGHT (int): Mouth right. + MOUTH_CENTER (int): Mouth center. + NOSE_BOTTOM_RIGHT (int): Nose, bottom right. + NOSE_BOTTOM_LEFT (int): Nose, bottom left. + NOSE_BOTTOM_CENTER (int): Nose, bottom center. + LEFT_EYE_TOP_BOUNDARY (int): Left eye, top boundary. + LEFT_EYE_RIGHT_CORNER (int): Left eye, right corner. + LEFT_EYE_BOTTOM_BOUNDARY (int): Left eye, bottom boundary. + LEFT_EYE_LEFT_CORNER (int): Left eye, left corner. + RIGHT_EYE_TOP_BOUNDARY (int): Right eye, top boundary. + RIGHT_EYE_RIGHT_CORNER (int): Right eye, right corner. + RIGHT_EYE_BOTTOM_BOUNDARY (int): Right eye, bottom boundary. + RIGHT_EYE_LEFT_CORNER (int): Right eye, left corner. + LEFT_EYEBROW_UPPER_MIDPOINT (int): Left eyebrow, upper midpoint. + RIGHT_EYEBROW_UPPER_MIDPOINT (int): Right eyebrow, upper midpoint. + LEFT_EAR_TRAGION (int): Left ear tragion. + RIGHT_EAR_TRAGION (int): Right ear tragion. + LEFT_EYE_PUPIL (int): Left eye pupil. + RIGHT_EYE_PUPIL (int): Right eye pupil. + FOREHEAD_GLABELLA (int): Forehead glabella. + CHIN_GNATHION (int): Chin gnathion. + CHIN_LEFT_GONION (int): Chin left gonion. + CHIN_RIGHT_GONION (int): Chin right gonion. + """ + UNKNOWN_LANDMARK = 0 + LEFT_EYE = 1 + RIGHT_EYE = 2 + LEFT_OF_LEFT_EYEBROW = 3 + RIGHT_OF_LEFT_EYEBROW = 4 + LEFT_OF_RIGHT_EYEBROW = 5 + RIGHT_OF_RIGHT_EYEBROW = 6 + MIDPOINT_BETWEEN_EYES = 7 + NOSE_TIP = 8 + UPPER_LIP = 9 + LOWER_LIP = 10 + MOUTH_LEFT = 11 + MOUTH_RIGHT = 12 + MOUTH_CENTER = 13 + NOSE_BOTTOM_RIGHT = 14 + NOSE_BOTTOM_LEFT = 15 + NOSE_BOTTOM_CENTER = 16 + LEFT_EYE_TOP_BOUNDARY = 17 + LEFT_EYE_RIGHT_CORNER = 18 + LEFT_EYE_BOTTOM_BOUNDARY = 19 + LEFT_EYE_LEFT_CORNER = 20 + RIGHT_EYE_TOP_BOUNDARY = 21 + RIGHT_EYE_RIGHT_CORNER = 22 + RIGHT_EYE_BOTTOM_BOUNDARY = 23 + RIGHT_EYE_LEFT_CORNER = 24 + LEFT_EYEBROW_UPPER_MIDPOINT = 25 + RIGHT_EYEBROW_UPPER_MIDPOINT = 26 + LEFT_EAR_TRAGION = 27 + RIGHT_EAR_TRAGION = 28 + LEFT_EYE_PUPIL = 29 + RIGHT_EYE_PUPIL = 30 + FOREHEAD_GLABELLA = 31 + CHIN_GNATHION = 32 + CHIN_LEFT_GONION = 33 + CHIN_RIGHT_GONION = 34 diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/image_annotator_client.py b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/image_annotator_client.py new file mode 100644 index 000000000000..4dfe79a52c71 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/image_annotator_client.py @@ -0,0 +1,151 @@ +# Copyright 2016, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# EDITING INSTRUCTIONS +# This file was generated from the file +# https://github.com/google/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto, +# and updates to that file get reflected here through a refresh process. +# For the short term, the refresh process will only be runnable by Google engineers. +# +# The only allowed edits are to method and file documentation. A 3-way +# merge preserves those additions if the generated source changes. +"""Accesses the google.cloud.vision.v1 ImageAnnotator API.""" + +import json +import os +import pkg_resources +import platform + +from google.gax import api_callable +from google.gax import config +from google.gax import path_template +import google.gax + +from google.cloud.gapic.vision.v1 import enums +from google.cloud.proto.vision.v1 import image_annotator_pb2 + + +class ImageAnnotatorClient(object): + """ + Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + + SERVICE_ADDRESS = 'vision.googleapis.com' + """The default address of the service.""" + + DEFAULT_SERVICE_PORT = 443 + """The default port of the service.""" + + _CODE_GEN_NAME_VERSION = 'gapic/0.1.0' + + _GAX_VERSION = pkg_resources.get_distribution('google-gax').version + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', ) + + def __init__(self, + service_path=SERVICE_ADDRESS, + port=DEFAULT_SERVICE_PORT, + channel=None, + credentials=None, + ssl_credentials=None, + scopes=None, + client_config=None, + app_name='gax', + app_version=_GAX_VERSION): + """Constructor. + + Args: + service_path (string): The domain name of the API remote host. + port (int): The port on which to connect to the remote host. + channel (:class:`grpc.Channel`): A ``Channel`` instance through + which to make calls. + credentials (object): The authorization credentials to attach to + requests. These credentials identify this application to the + service. + ssl_credentials (:class:`grpc.ChannelCredentials`): A + ``ChannelCredentials`` instance for use with an SSL-enabled + channel. + scopes (list[string]): A list of OAuth2 scopes to attach to requests. + client_config (dict): + A dictionary for call options for each method. See + :func:`google.gax.construct_settings` for the structure of + this data. Falls back to the default config if not specified + or the specified config is missing data points. + app_name (string): The codename of the calling service. + app_version (string): The version of the calling service. + + Returns: + A ImageAnnotatorClient object. + """ + if scopes is None: + scopes = self._ALL_SCOPES + if client_config is None: + client_config = {} + goog_api_client = '{}/{} {} gax/{} python/{}'.format( + app_name, app_version, self._CODE_GEN_NAME_VERSION, + self._GAX_VERSION, platform.python_version()) + metadata = [('x-goog-api-client', goog_api_client)] + default_client_config = json.loads( + pkg_resources.resource_string( + __name__, 'image_annotator_client_config.json').decode()) + defaults = api_callable.construct_settings( + 'google.cloud.vision.v1.ImageAnnotator', + default_client_config, + client_config, + config.STATUS_CODE_NAMES, + kwargs={'metadata': metadata}) + self.image_annotator_stub = config.create_stub( + image_annotator_pb2.ImageAnnotatorStub, + channel=channel, + service_path=service_path, + service_port=port, + credentials=credentials, + scopes=scopes, + ssl_credentials=ssl_credentials) + + self._batch_annotate_images = api_callable.create_api_call( + self.image_annotator_stub.BatchAnnotateImages, + settings=defaults['batch_annotate_images']) + + # Service calls + def batch_annotate_images(self, requests, options=None): + """ + Run image detection and annotation for a batch of images. + + Example: + >>> from google.cloud.gapic.vision.v1 import image_annotator_client + >>> from google.cloud.proto.vision.v1 import image_annotator_pb2 + >>> api = image_annotator_client.ImageAnnotatorClient() + >>> requests = [] + >>> response = api.batch_annotate_images(requests) + + Args: + requests (list[:class:`google.cloud.proto.vision.v1.image_annotator_pb2.AnnotateImageRequest`]): Individual image annotation requests for this batch. + options (:class:`google.gax.CallOptions`): Overrides the default + settings for this call, e.g, timeout, retries etc. + + Returns: + A :class:`google.cloud.proto.vision.v1.image_annotator_pb2.BatchAnnotateImagesResponse` instance. + + Raises: + :exc:`google.gax.errors.GaxError` if the RPC is aborted. + :exc:`ValueError` if the parameters are invalid. + """ + request = image_annotator_pb2.BatchAnnotateImagesRequest( + requests=requests) + return self._batch_annotate_images(request, options) diff --git a/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/image_annotator_client_config.json b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/image_annotator_client_config.json new file mode 100644 index 000000000000..12676ae8e70c --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/google/cloud/gapic/vision/v1/image_annotator_client_config.json @@ -0,0 +1,33 @@ +{ + "interfaces": { + "google.cloud.vision.v1.ImageAnnotator": { + "retry_codes": { + "retry_codes_def": { + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "non_idempotent": [] + } + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "BatchAnnotateImages": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/_vendor/gapic-google-cloud-vision-v1/requirements.txt b/_vendor/gapic-google-cloud-vision-v1/requirements.txt new file mode 100644 index 000000000000..78d0ea1fae8f --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/requirements.txt @@ -0,0 +1,4 @@ +googleapis-common-protos>=1.5.0, <2.0dev +google-gax>=0.15.1, <0.16dev +proto-google-cloud-vision-v1>=0.15.0, <0.16dev +oauth2client>=2.0.0, <4.0dev \ No newline at end of file diff --git a/_vendor/gapic-google-cloud-vision-v1/setup.py b/_vendor/gapic-google-cloud-vision-v1/setup.py new file mode 100644 index 000000000000..18517cb60c31 --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/setup.py @@ -0,0 +1,46 @@ +"""A setup module for the GAPIC Google Cloud Vision API library. + +See: +https://packaging.python.org/en/latest/distributing.html +https://github.com/pypa/sampleproject +""" + +from setuptools import setup, find_packages +import sys + +install_requires = [ + 'googleapis-common-protos>=1.5.0, <2.0dev', + 'google-gax>=0.15.1, <0.16dev', + 'proto-google-cloud-vision-v1>=0.15.0, <0.16dev', + 'oauth2client>=2.0.0, <4.0dev', +] + +setup( + name='gapic-google-cloud-vision-v1', + version='0.15.0', + author='Google Inc', + author_email='googleapis-packages@google.com', + classifiers=[ + 'Intended Audience :: Developers', + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: Implementation :: CPython', + ], + description='GAPIC library for the Google Cloud Vision API', + include_package_data=True, + long_description=open('README.rst').read(), + install_requires=install_requires, + license='Apache-2.0', + packages=find_packages(), + namespace_packages=[ + 'google', 'google.cloud', 'google.cloud.gapic', + 'google.cloud.gapic.vision' + ], + url='https://github.com/googleapis/googleapis') diff --git a/_vendor/gapic-google-cloud-vision-v1/tox.ini b/_vendor/gapic-google-cloud-vision-v1/tox.ini new file mode 100644 index 000000000000..24e3dc404afc --- /dev/null +++ b/_vendor/gapic-google-cloud-vision-v1/tox.ini @@ -0,0 +1,34 @@ +[tox] +envlist = docs +skip_missing_interpreters=True + +[testenv:docs] +basepython = python2.7 +commands = + python -c "import shutil; shutil.rmtree('docs/_build', ignore_errors=True)" + python -c "import shutil; shutil.rmtree('docs/generated', ignore_errors=True)" + sphinx-build -W -b html -d docs/_build/doctrees docs docs/_build/html +deps = + Sphinx + sphinx_rtd_theme + +[testenv:upload-package] +basepython = python2.7 +commands = + rm -rf grpc_google_pubsub_v1.egg-info/ dist/ setup.cfg + python -c 'import os; os.rename("MANIFEST.in", "MANIFEST.in.orig")' + python -c 'open("MANIFEST.in", "w").write(open("MANIFEST.in.orig").read() + "\nglobal-exclude *_pb2.py*\n")' + python setup.py sdist register upload + python -c 'import os; os.rename("MANIFEST.in.orig", "MANIFEST.in")' + +[testenv:upload-docs] +basepython = python2.7 +commands = + python -c "import shutil; shutil.rmtree('docs/_build', ignore_errors=True)" + python -c "import shutil; shutil.rmtree('docs/generated', ignore_errors=True)" + sphinx-build -W -b html -d docs/_build/doctrees docs docs/_build/html + python setup.py upload_docs --upload-dir docs/_build/html +deps = + Sphinx + sphinx_rtd_theme + sphinx-pypi-upload diff --git a/_vendor/proto-google-cloud-vision-v1/LICENSE b/_vendor/proto-google-cloud-vision-v1/LICENSE new file mode 100644 index 000000000000..724a8807144b --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/_vendor/proto-google-cloud-vision-v1/MANIFEST.in b/_vendor/proto-google-cloud-vision-v1/MANIFEST.in new file mode 100644 index 000000000000..3223937deeaa --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/MANIFEST.in @@ -0,0 +1,3 @@ +include README.rst LICENSE +global-exclude *.py[co] +global-exclude __pycache__ diff --git a/_vendor/proto-google-cloud-vision-v1/PUBLISHING.rst b/_vendor/proto-google-cloud-vision-v1/PUBLISHING.rst new file mode 100644 index 000000000000..18b1ef31ebdf --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/PUBLISHING.rst @@ -0,0 +1,27 @@ +PUBLISHING +---------- + +Note: This folder has been generated by the gen-api-package tool. + +The instructions assumes that no changes have been made to the folder and its +contents since it was created. + +PREREQUISITES +------------- + +- Python must be installed. + + +TO PUBLISH +---------- + +- Make sure you have `an account`_ on pypi_. +- Publish your package as described in `Packaging and Distributing Projects`_. + +E.g, + + $ python setup.py sdist register upload + +.. _`Packaging and Distributing projects`: https://packaging.python.org/en/latest/distributing.html#uploading-your-project-to-pypi +.. _`an account`: https://pypi.python.org/pypi?%3Aaction=register_form +.. _pypi: http://pypi.python.org diff --git a/_vendor/proto-google-cloud-vision-v1/README.rst b/_vendor/proto-google-cloud-vision-v1/README.rst new file mode 100644 index 000000000000..3a50ece42ae6 --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/README.rst @@ -0,0 +1,5 @@ +gRPC library for Google Cloud Vision API + +proto-google-cloud-vision-v1 is the IDL-derived library for the vision (v1) service in the googleapis_ repository. + +.. _`googleapis`: https://github.com/googleapis/googleapis/tree/master/google/cloud/vision/v1 diff --git a/_vendor/proto-google-cloud-vision-v1/google/__init__.py b/_vendor/proto-google-cloud-vision-v1/google/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/__init__.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/__init__.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/__init__.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/__init__.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/__init__.py @@ -0,0 +1 @@ + diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/geometry_pb2.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/geometry_pb2.py new file mode 100644 index 000000000000..936e4ec361aa --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/geometry_pb2.py @@ -0,0 +1,181 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/proto/vision/v1/geometry.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/proto/vision/v1/geometry.proto', + package='google.cloud.vision.v1', + syntax='proto3', + serialized_pb=_b('\n+google/cloud/proto/vision/v1/geometry.proto\x12\x16google.cloud.vision.v1\"\x1e\n\x06Vertex\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"@\n\x0c\x42oundingPoly\x12\x30\n\x08vertices\x18\x01 \x03(\x0b\x32\x1e.google.cloud.vision.v1.Vertex\"+\n\x08Position\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x42n\n\x1a\x63om.google.cloud.vision.v1B\rGeometryProtoP\x01Z\n\x10\x66\x64_bounding_poly\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x42\n\tlandmarks\x18\x03 \x03(\x0b\x32/.google.cloud.vision.v1.FaceAnnotation.Landmark\x12\x12\n\nroll_angle\x18\x04 \x01(\x02\x12\x11\n\tpan_angle\x18\x05 \x01(\x02\x12\x12\n\ntilt_angle\x18\x06 \x01(\x02\x12\x1c\n\x14\x64\x65tection_confidence\x18\x07 \x01(\x02\x12\x1e\n\x16landmarking_confidence\x18\x08 \x01(\x02\x12:\n\x0ejoy_likelihood\x18\t \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12=\n\x11sorrow_likelihood\x18\n \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12<\n\x10\x61nger_likelihood\x18\x0b \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12?\n\x13surprise_likelihood\x18\x0c \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x44\n\x18under_exposed_likelihood\x18\r \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12>\n\x12\x62lurred_likelihood\x18\x0e \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12?\n\x13headwear_likelihood\x18\x0f \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x1a\xb9\x07\n\x08Landmark\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32\x34.google.cloud.vision.v1.FaceAnnotation.Landmark.Type\x12\x32\n\x08position\x18\x04 \x01(\x0b\x32 .google.cloud.vision.v1.Position\"\xb4\x06\n\x04Type\x12\x14\n\x10UNKNOWN_LANDMARK\x10\x00\x12\x0c\n\x08LEFT_EYE\x10\x01\x12\r\n\tRIGHT_EYE\x10\x02\x12\x18\n\x14LEFT_OF_LEFT_EYEBROW\x10\x03\x12\x19\n\x15RIGHT_OF_LEFT_EYEBROW\x10\x04\x12\x19\n\x15LEFT_OF_RIGHT_EYEBROW\x10\x05\x12\x1a\n\x16RIGHT_OF_RIGHT_EYEBROW\x10\x06\x12\x19\n\x15MIDPOINT_BETWEEN_EYES\x10\x07\x12\x0c\n\x08NOSE_TIP\x10\x08\x12\r\n\tUPPER_LIP\x10\t\x12\r\n\tLOWER_LIP\x10\n\x12\x0e\n\nMOUTH_LEFT\x10\x0b\x12\x0f\n\x0bMOUTH_RIGHT\x10\x0c\x12\x10\n\x0cMOUTH_CENTER\x10\r\x12\x15\n\x11NOSE_BOTTOM_RIGHT\x10\x0e\x12\x14\n\x10NOSE_BOTTOM_LEFT\x10\x0f\x12\x16\n\x12NOSE_BOTTOM_CENTER\x10\x10\x12\x19\n\x15LEFT_EYE_TOP_BOUNDARY\x10\x11\x12\x19\n\x15LEFT_EYE_RIGHT_CORNER\x10\x12\x12\x1c\n\x18LEFT_EYE_BOTTOM_BOUNDARY\x10\x13\x12\x18\n\x14LEFT_EYE_LEFT_CORNER\x10\x14\x12\x1a\n\x16RIGHT_EYE_TOP_BOUNDARY\x10\x15\x12\x1a\n\x16RIGHT_EYE_RIGHT_CORNER\x10\x16\x12\x1d\n\x19RIGHT_EYE_BOTTOM_BOUNDARY\x10\x17\x12\x19\n\x15RIGHT_EYE_LEFT_CORNER\x10\x18\x12\x1f\n\x1bLEFT_EYEBROW_UPPER_MIDPOINT\x10\x19\x12 \n\x1cRIGHT_EYEBROW_UPPER_MIDPOINT\x10\x1a\x12\x14\n\x10LEFT_EAR_TRAGION\x10\x1b\x12\x15\n\x11RIGHT_EAR_TRAGION\x10\x1c\x12\x12\n\x0eLEFT_EYE_PUPIL\x10\x1d\x12\x13\n\x0fRIGHT_EYE_PUPIL\x10\x1e\x12\x15\n\x11\x46OREHEAD_GLABELLA\x10\x1f\x12\x11\n\rCHIN_GNATHION\x10 \x12\x14\n\x10\x43HIN_LEFT_GONION\x10!\x12\x15\n\x11\x43HIN_RIGHT_GONION\x10\"\"4\n\x0cLocationInfo\x12$\n\x07lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\"\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xa7\x02\n\x10\x45ntityAnnotation\x12\x0b\n\x03mid\x18\x01 \x01(\t\x12\x0e\n\x06locale\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\x12\x12\n\ntopicality\x18\x06 \x01(\x02\x12;\n\rbounding_poly\x18\x07 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x37\n\tlocations\x18\x08 \x03(\x0b\x32$.google.cloud.vision.v1.LocationInfo\x12\x34\n\nproperties\x18\t \x03(\x0b\x32 .google.cloud.vision.v1.Property\"\xe7\x01\n\x14SafeSearchAnnotation\x12\x31\n\x05\x61\x64ult\x18\x01 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x31\n\x05spoof\x18\x02 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x33\n\x07medical\x18\x03 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\x12\x34\n\x08violence\x18\x04 \x01(\x0e\x32\".google.cloud.vision.v1.Likelihood\"a\n\x0bLatLongRect\x12(\n\x0bmin_lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\x12(\n\x0bmax_lat_lng\x18\x02 \x01(\x0b\x32\x13.google.type.LatLng\"U\n\tColorInfo\x12!\n\x05\x63olor\x18\x01 \x01(\x0b\x32\x12.google.type.Color\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x16\n\x0epixel_fraction\x18\x03 \x01(\x02\"M\n\x18\x44ominantColorsAnnotation\x12\x31\n\x06\x63olors\x18\x01 \x03(\x0b\x32!.google.cloud.vision.v1.ColorInfo\"\\\n\x0fImageProperties\x12I\n\x0f\x64ominant_colors\x18\x01 \x01(\x0b\x32\x30.google.cloud.vision.v1.DominantColorsAnnotation\"x\n\x08\x43ropHint\x12;\n\rbounding_poly\x18\x01 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x1b\n\x13importance_fraction\x18\x03 \x01(\x02\"K\n\x13\x43ropHintsAnnotation\x12\x34\n\ncrop_hints\x18\x01 \x03(\x0b\x32 .google.cloud.vision.v1.CropHint\"(\n\x0f\x43ropHintsParams\x12\x15\n\raspect_ratios\x18\x01 \x03(\x02\"\xa6\x01\n\x0cImageContext\x12:\n\rlat_long_rect\x18\x01 \x01(\x0b\x32#.google.cloud.vision.v1.LatLongRect\x12\x16\n\x0elanguage_hints\x18\x02 \x03(\t\x12\x42\n\x11\x63rop_hints_params\x18\x04 \x01(\x0b\x32\'.google.cloud.vision.v1.CropHintsParams\"\xb4\x01\n\x14\x41nnotateImageRequest\x12,\n\x05image\x18\x01 \x01(\x0b\x32\x1d.google.cloud.vision.v1.Image\x12\x31\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0b\x32\x1f.google.cloud.vision.v1.Feature\x12;\n\rimage_context\x18\x03 \x01(\x0b\x32$.google.cloud.vision.v1.ImageContext\"\xfe\x05\n\x15\x41nnotateImageResponse\x12@\n\x10\x66\x61\x63\x65_annotations\x18\x01 \x03(\x0b\x32&.google.cloud.vision.v1.FaceAnnotation\x12\x46\n\x14landmark_annotations\x18\x02 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x42\n\x10logo_annotations\x18\x03 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x43\n\x11label_annotations\x18\x04 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x42\n\x10text_annotations\x18\x05 \x03(\x0b\x32(.google.cloud.vision.v1.EntityAnnotation\x12\x44\n\x14\x66ull_text_annotation\x18\x0c \x01(\x0b\x32&.google.cloud.vision.v1.TextAnnotation\x12L\n\x16safe_search_annotation\x18\x06 \x01(\x0b\x32,.google.cloud.vision.v1.SafeSearchAnnotation\x12L\n\x1bimage_properties_annotation\x18\x08 \x01(\x0b\x32\'.google.cloud.vision.v1.ImageProperties\x12J\n\x15\x63rop_hints_annotation\x18\x0b \x01(\x0b\x32+.google.cloud.vision.v1.CropHintsAnnotation\x12=\n\x0eweb_annotation\x18\r \x01(\x0b\x32%.google.cloud.vision.v1.WebAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"\\\n\x1a\x42\x61tchAnnotateImagesRequest\x12>\n\x08requests\x18\x01 \x03(\x0b\x32,.google.cloud.vision.v1.AnnotateImageRequest\"_\n\x1b\x42\x61tchAnnotateImagesResponse\x12@\n\tresponses\x18\x01 \x03(\x0b\x32-.google.cloud.vision.v1.AnnotateImageResponse*e\n\nLikelihood\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xb1\x01\n\x0eImageAnnotator\x12\x9e\x01\n\x13\x42\x61tchAnnotateImages\x12\x32.google.cloud.vision.v1.BatchAnnotateImagesRequest\x1a\x33.google.cloud.vision.v1.BatchAnnotateImagesResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\"\x13/v1/images:annotate:\x01*Bt\n\x1a\x63om.google.cloud.vision.v1B\x13ImageAnnotatorProtoP\x01Z=0.15.0.""" + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + def BatchAnnotateImages(self, request, context): + """Run image detection and annotation for a batch of images. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaImageAnnotatorStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Run image detection and annotation for a batch of images. + """ + raise NotImplementedError() + BatchAnnotateImages.future = None + + + def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString, + } + response_serializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString, + } + method_implementations = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString, + } + response_deserializers = { + ('google.cloud.vision.v1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString, + } + cardinalities = { + 'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1.ImageAnnotator', cardinalities, options=stub_options) +except ImportError: + pass +# @@protoc_insertion_point(module_scope) diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/image_annotator_pb2_grpc.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/image_annotator_pb2_grpc.py new file mode 100644 index 000000000000..65b8cfd58b77 --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/image_annotator_pb2_grpc.py @@ -0,0 +1,52 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import google.cloud.proto.vision.v1.image_annotator_pb2 as google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2 +import google.cloud.proto.vision.v1.image_annotator_pb2 as google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2 + + +class ImageAnnotatorStub(object): + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.BatchAnnotateImages = channel.unary_unary( + '/google.cloud.vision.v1.ImageAnnotator/BatchAnnotateImages', + request_serializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesResponse.FromString, + ) + + +class ImageAnnotatorServicer(object): + """Service that performs Google Cloud Vision API detection tasks over client + images, such as face, landmark, logo, label, and text detection. The + ImageAnnotator service returns detected entities from the images. + """ + + def BatchAnnotateImages(self, request, context): + """Run image detection and annotation for a batch of images. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ImageAnnotatorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'BatchAnnotateImages': grpc.unary_unary_rpc_method_handler( + servicer.BatchAnnotateImages, + request_deserializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesRequest.FromString, + response_serializer=google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_image__annotator__pb2.BatchAnnotateImagesResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.cloud.vision.v1.ImageAnnotator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/text_annotation_pb2.py b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/text_annotation_pb2.py new file mode 100644 index 000000000000..0dada8f4664a --- /dev/null +++ b/_vendor/proto-google-cloud-vision-v1/google/cloud/proto/vision/v1/text_annotation_pb2.py @@ -0,0 +1,606 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/proto/vision/v1/text_annotation.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.proto.vision.v1 import geometry_pb2 as google_dot_cloud_dot_proto_dot_vision_dot_v1_dot_geometry__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/proto/vision/v1/text_annotation.proto', + package='google.cloud.vision.v1', + syntax='proto3', + serialized_pb=_b('\n2google/cloud/proto/vision/v1/text_annotation.proto\x12\x16google.cloud.vision.v1\x1a\x1cgoogle/api/annotations.proto\x1a+google/cloud/proto/vision/v1/geometry.proto\"\x96\x04\n\x0eTextAnnotation\x12+\n\x05pages\x18\x01 \x03(\x0b\x32\x1c.google.cloud.vision.v1.Page\x12\x0c\n\x04text\x18\x02 \x01(\t\x1a=\n\x10\x44\x65tectedLanguage\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x1a\xd5\x01\n\rDetectedBreak\x12L\n\x04type\x18\x01 \x01(\x0e\x32>.google.cloud.vision.v1.TextAnnotation.DetectedBreak.BreakType\x12\x11\n\tis_prefix\x18\x02 \x01(\x08\"c\n\tBreakType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05SPACE\x10\x01\x12\x0e\n\nSURE_SPACE\x10\x02\x12\x12\n\x0e\x45OL_SURE_SPACE\x10\x03\x12\n\n\x06HYPHEN\x10\x04\x12\x0e\n\nLINE_BREAK\x10\x05\x1a\xb1\x01\n\x0cTextProperty\x12S\n\x12\x64\x65tected_languages\x18\x01 \x03(\x0b\x32\x37.google.cloud.vision.v1.TextAnnotation.DetectedLanguage\x12L\n\x0e\x64\x65tected_break\x18\x02 \x01(\x0b\x32\x34.google.cloud.vision.v1.TextAnnotation.DetectedBreak\"\x9b\x01\n\x04Page\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x0e\n\x06height\x18\x03 \x01(\x05\x12-\n\x06\x62locks\x18\x04 \x03(\x0b\x32\x1d.google.cloud.vision.v1.Block\"\xd2\x02\n\x05\x42lock\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x35\n\nparagraphs\x18\x03 \x03(\x0b\x32!.google.cloud.vision.v1.Paragraph\x12;\n\nblock_type\x18\x04 \x01(\x0e\x32\'.google.cloud.vision.v1.Block.BlockType\"R\n\tBlockType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04TEXT\x10\x01\x12\t\n\x05TABLE\x10\x02\x12\x0b\n\x07PICTURE\x10\x03\x12\t\n\x05RULER\x10\x04\x12\x0b\n\x07\x42\x41RCODE\x10\x05\"\xbb\x01\n\tParagraph\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12+\n\x05words\x18\x03 \x03(\x0b\x32\x1c.google.cloud.vision.v1.Word\"\xba\x01\n\x04Word\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12/\n\x07symbols\x18\x03 \x03(\x0b\x32\x1e.google.cloud.vision.v1.Symbol\"\x99\x01\n\x06Symbol\x12\x45\n\x08property\x18\x01 \x01(\x0b\x32\x33.google.cloud.vision.v1.TextAnnotation.TextProperty\x12:\n\x0c\x62ounding_box\x18\x02 \x01(\x0b\x32$.google.cloud.vision.v1.BoundingPoly\x12\x0c\n\x04text\x18\x03 \x01(\tBt\n\x1a\x63om.google.cloud.vision.v1B\x13TextAnnotationProtoP\x01Z=2.0.0, <4.0dev', + 'grpcio>=1.0.2, <2.0dev', + 'googleapis-common-protos[grpc]>=1.5.0, <2.0dev' +] + +setuptools.setup( + name='proto-google-cloud-vision-v1', + version='0.15.0', + author='Google Inc', + author_email='googleapis-packages@google.com', + classifiers=[ + 'Intended Audience :: Developers', + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: Implementation :: CPython', + ], + description='GRPC library for the Google Cloud Vision API', + long_description=open('README.rst').read(), + install_requires=install_requires, + license='Apache-2.0', + packages=find_packages(), + namespace_packages=['google.cloud.proto.vision', 'google.cloud.proto', 'google.cloud', 'google'], + url='https://github.com/googleapis/googleapis' +) diff --git a/docs/index.rst b/docs/index.rst index 3913c55b7ea1..254e439724a4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -158,6 +158,7 @@ vision-batch vision-client vision-color + vision-crop-hint vision-entity vision-feature vision-face diff --git a/docs/vision-crop-hint.rst b/docs/vision-crop-hint.rst new file mode 100644 index 000000000000..14be33de2761 --- /dev/null +++ b/docs/vision-crop-hint.rst @@ -0,0 +1,10 @@ +Vision Crop Hint +================ + +Crop Hint +~~~~~~~~~ + +.. automodule:: google.cloud.vision.crop_hint + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/vision-usage.rst b/docs/vision-usage.rst index 242cb86903a7..1382df80d6f5 100644 --- a/docs/vision-usage.rst +++ b/docs/vision-usage.rst @@ -98,6 +98,30 @@ You can call the detection method manually. 'github' +********** +Crop Hints +********** + +:meth:`~google.cloud.vision.image.Image.detect_crop_hints` will attempt to find +boundaries that contain interesting data which can be used to crop an image. + +.. code-block:: python + + >>> from google.cloud import vision + >>> client = vision.Client() + >>> image = client.image(source_uri='gs://my-test-bucket/image.jpg') + >>> crop_hints = image.detect_crop_hints(aspect_ratios=[1.3333], limit=2) + >>> first_hint = crop_hints[0] + >>> first_hint.bounds.vertices[0].x_coordinate + 77 + >>> first_hint.bounds.vertices[0].y_coordinate + 102 + >>> first_hint.confidence + 0.5 + >>> first_hint.importance_fraction + 1.22000002861 + + ************** Face Detection ************** diff --git a/system_tests/vision.py b/system_tests/vision.py index 5d5d903c6793..3068af1fd29f 100644 --- a/system_tests/vision.py +++ b/system_tests/vision.py @@ -80,6 +80,61 @@ def _pb_not_implemented_skip(self, message): self.skipTest(message) +class TestVisionClientCropHint(BaseVisionTestCase): + def setUp(self): + self.to_delete_by_case = [] + + def tearDown(self): + for value in self.to_delete_by_case: + value.delete() + + def _assert_crop_hint(self, hint): + from google.cloud.vision.crop_hint import CropHint + from google.cloud.vision.geometry import Bounds + + self.assertIsInstance(hint, CropHint) + self.assertIsInstance(hint.bounds, Bounds) + self.assertGreater(hint.bounds.vertices, 1) + self.assertIsInstance(hint.confidence, (int, float)) + self.assertIsInstance(hint.importance_fraction, float) + + def test_detect_crop_hints_content(self): + client = Config.CLIENT + with open(FACE_FILE, 'rb') as image_file: + image = client.image(content=image_file.read()) + crop_hints = image.detect_crop_hints( + aspect_ratios=[1.3333, 1.7777], limit=2) + self.assertEqual(len(crop_hints), 2) + for hint in crop_hints: + self._assert_crop_hint(hint) + + def test_detect_crop_hints_filename(self): + client = Config.CLIENT + image = client.image(filename=FACE_FILE) + crop_hints = image.detect_crop_hints( + aspect_ratios=[1.3333, 1.7777], limit=2) + self.assertEqual(len(crop_hints), 2) + for hint in crop_hints: + self._assert_crop_hint(hint) + + def test_detect_crop_hints_gcs(self): + bucket_name = Config.TEST_BUCKET.name + blob_name = 'faces.jpg' + blob = Config.TEST_BUCKET.blob(blob_name) + self.to_delete_by_case.append(blob) # Clean-up. + with open(FACE_FILE, 'rb') as file_obj: + blob.upload_from_file(file_obj) + + source_uri = 'gs://%s/%s' % (bucket_name, blob_name) + client = Config.CLIENT + image = client.image(source_uri=source_uri) + crop_hints = image.detect_crop_hints( + aspect_ratios=[1.3333, 1.7777], limit=2) + self.assertEqual(len(crop_hints), 2) + for hint in crop_hints: + self._assert_crop_hint(hint) + + class TestVisionClientLogo(unittest.TestCase): def setUp(self): self.to_delete_by_case = [] diff --git a/tox.ini b/tox.ini index 043c424d05a1..3d00c05b2b9c 100644 --- a/tox.ini +++ b/tox.ini @@ -16,6 +16,8 @@ deps = {toxinidir}/error_reporting {toxinidir}/resource_manager {toxinidir}/monitoring + {toxinidir}/_vendor/proto-google-cloud-vision-v1 + {toxinidir}/_vendor/gapic-google-cloud-vision-v1 {toxinidir}/vision {toxinidir}/translate {toxinidir}/speech @@ -263,6 +265,8 @@ commands = {[testing]localdeps} python {toxinidir}/system_tests/attempt_system_tests.py {posargs} deps = + {toxinidir}/_vendor/proto-google-cloud-vision-v1 + {toxinidir}/_vendor/gapic-google-cloud-vision-v1 {[testing]deps} Sphinx passenv = diff --git a/vision/google/cloud/vision/_gax.py b/vision/google/cloud/vision/_gax.py index 55f3dfd4adf2..91872dc7bfae 100644 --- a/vision/google/cloud/vision/_gax.py +++ b/vision/google/cloud/vision/_gax.py @@ -15,7 +15,7 @@ """GAX Client for interacting with the Google Cloud Vision API.""" from google.cloud.gapic.vision.v1 import image_annotator_client -from google.cloud.grpc.vision.v1 import image_annotator_pb2 +from google.cloud.proto.vision.v1 import image_annotator_pb2 from google.cloud.vision.annotations import Annotations @@ -30,7 +30,7 @@ def __init__(self, client=None): self._client = client self._annotator_client = image_annotator_client.ImageAnnotatorClient() - def annotate(self, images): + def annotate(self, images=None, requests_pb=None): """Annotate images through GAX. :type images: list @@ -39,18 +39,28 @@ def annotate(self, images): :class:`~google.cloud.vision.feature.Feature`. e.g. [(image, [feature_one, feature_two]),] + :type requests_pb: list + :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageRequest` + :rtype: list :returns: List of :class:`~google.cloud.vision.annotations.Annotations`. """ - requests = [] - for image, features in images: - gapic_features = [_to_gapic_feature(feature) - for feature in features] - gapic_image = _to_gapic_image(image) - request = image_annotator_pb2.AnnotateImageRequest( - image=gapic_image, features=gapic_features) - requests.append(request) + if any([images, requests_pb]) is False: + return [] + + if requests_pb is None: + requests = [] + for image, features in images: + gapic_features = [_to_gapic_feature(feature) + for feature in features] + gapic_image = _to_gapic_image(image) + request = image_annotator_pb2.AnnotateImageRequest( + image=gapic_image, features=gapic_features) + requests.append(request) + else: + requests = requests_pb annotator_client = self._annotator_client responses = annotator_client.batch_annotate_images(requests).responses @@ -64,7 +74,7 @@ def _to_gapic_feature(feature): :param feature: Local ``Feature`` class to be converted to gRPC ``Feature`` instance. - :rtype: :class:`~google.cloud.grpc.vision.v1.image_annotator_pb2.Feature` + :rtype: :class:`google.cloud.proto.vision.v1.image_annotator_pb2.Feature` :returns: gRPC ``Feature`` converted from :class:`~google.cloud.vision.feature.Feature`. """ @@ -79,16 +89,23 @@ def _to_gapic_image(image): :type image: :class:`~google.cloud.vision.image.Image` :param image: Local ``Image`` class to be converted to gRPC ``Image``. - :rtype: :class:`~google.cloud.grpc.vision.v1.image_annotator_pb2.Image` + :rtype: :class:`google.cloud.proto.vision.v1.image_annotator_pb2.Image` :returns: gRPC ``Image`` converted from :class:`~google.cloud.vision.image.Image`. """ if image.content is not None: return image_annotator_pb2.Image(content=image.content) if image.source is not None: - return image_annotator_pb2.Image( - source=image_annotator_pb2.ImageSource( - gcs_image_uri=image.source - ), - ) + if image.source.startswith('gs://'): + return image_annotator_pb2.Image( + source=image_annotator_pb2.ImageSource( + gcs_image_uri=image.source + ), + ) + elif image.source.startswith(('http://', 'https://')): + return image_annotator_pb2.Image( + source=image_annotator_pb2.ImageSource( + image_uri=image.source + ), + ) raise ValueError('No image content or source found.') diff --git a/vision/google/cloud/vision/_http.py b/vision/google/cloud/vision/_http.py index 35d9c76ef8c6..23b024f9c0e4 100644 --- a/vision/google/cloud/vision/_http.py +++ b/vision/google/cloud/vision/_http.py @@ -14,9 +14,13 @@ """HTTP Client for interacting with the Google Cloud Vision API.""" +import json + from google.cloud.vision.annotations import Annotations from google.cloud.vision.feature import Feature +from google.protobuf import json_format + class _HTTPVisionAPI(object): """Vision API for interacting with the JSON/HTTP version of Vision @@ -29,19 +33,35 @@ def __init__(self, client): self._client = client self._connection = client._connection - def annotate(self, images): + def annotate(self, images=None, requests_pb=None): """Annotate an image to discover it's attributes. :type images: list of :class:`~google.cloud.vision.image.Image` :param images: A list of ``Image``. + :rtype: list + :returns: List of :class:`~googe.cloud.vision.annotations.Annotations`. + + :type requests_pb: list + :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ + image_annotator_b2.AnnotateImageRequest`. + :rtype: list :returns: List of :class:`~googe.cloud.vision.annotations.Annotations`. """ + if any([images, requests_pb]) is False: + return [] + requests = [] - for image, features in images: - requests.append(_make_request(image, features)) + if requests_pb is None: + for image, features in images: + requests.append(_make_request(image, features)) + else: + requests = [json.loads(json_format.MessageToJson(request)) + for request in requests_pb] + data = {'requests': requests} + api_response = self._connection.api_request( method='POST', path='/images:annotate', data=data) responses = api_response.get('responses') diff --git a/vision/google/cloud/vision/annotations.py b/vision/google/cloud/vision/annotations.py index 2348d2361731..e47438b43a9c 100644 --- a/vision/google/cloud/vision/annotations.py +++ b/vision/google/cloud/vision/annotations.py @@ -17,29 +17,36 @@ import six from google.cloud.vision.color import ImagePropertiesAnnotation +from google.cloud.vision.crop_hint import CropHint from google.cloud.vision.entity import EntityAnnotation from google.cloud.vision.face import Face from google.cloud.vision.safe_search import SafeSearchAnnotation +_CROP_HINTS_ANNOTATION = 'cropHintsAnnotation' _FACE_ANNOTATIONS = 'faceAnnotations' _IMAGE_PROPERTIES_ANNOTATION = 'imagePropertiesAnnotation' _SAFE_SEARCH_ANNOTATION = 'safeSearchAnnotation' _KEY_MAP = { + _CROP_HINTS_ANNOTATION: 'crop_hints', _FACE_ANNOTATIONS: 'faces', _IMAGE_PROPERTIES_ANNOTATION: 'properties', 'labelAnnotations': 'labels', 'landmarkAnnotations': 'landmarks', 'logoAnnotations': 'logos', _SAFE_SEARCH_ANNOTATION: 'safe_searches', - 'textAnnotations': 'texts' + 'textAnnotations': 'texts', } class Annotations(object): """Helper class to bundle annotation responses. + :type crop_hints: list + :param crop_hints: List of + :class:`~google.cloud.vision.crop_hint.CropHintsAnnotation`. + :type faces: list :param faces: List of :class:`~google.cloud.vision.face.Face`. @@ -67,8 +74,9 @@ class Annotations(object): :param texts: List of :class:`~google.cloud.vision.entity.EntityAnnotation`. """ - def __init__(self, faces=(), properties=(), labels=(), landmarks=(), - logos=(), safe_searches=(), texts=()): + def __init__(self, crop_hints=(), faces=(), properties=(), labels=(), + landmarks=(), logos=(), safe_searches=(), texts=()): + self.crop_hints = crop_hints self.faces = faces self.properties = properties self.labels = labels @@ -99,7 +107,7 @@ def from_api_repr(cls, response): def from_pb(cls, response): """Factory: construct an instance of ``Annotations`` from protobuf. - :type response: :class:`~google.cloud.grpc.vision.v1.\ + :type response: :class:`google.cloud.proto.vision.v1.\ image_annotator_pb2.AnnotateImageResponse` :param response: ``AnnotateImageResponse`` from protobuf call. @@ -113,7 +121,7 @@ def from_pb(cls, response): def _process_image_annotations(image): """Helper for processing annotation types from protobuf. - :type image: :class:`~google.cloud.grpc.vision.v1.image_annotator_pb2.\ + :type image: :class:`google.cloud.proto.vision.v1.image_annotator_pb2.\ AnnotateImageResponse` :param image: ``AnnotateImageResponse`` from protobuf. @@ -121,6 +129,7 @@ def _process_image_annotations(image): :returns: Dictionary populated with entities from response. """ return { + 'crop_hints': _make_crop_hints_from_pb(image.crop_hints_annotation), 'faces': _make_faces_from_pb(image.face_annotations), 'labels': _make_entity_from_pb(image.label_annotations), 'landmarks': _make_entity_from_pb(image.landmark_annotations), @@ -133,11 +142,25 @@ def _process_image_annotations(image): } +def _make_crop_hints_from_pb(crop_hints): + """Create list of ``CropHint`` objects from a protobuf response. + + :type crop_hints: list + :param crop_hints: List of + :class:`google.cloud.grpc.vision.v1.\ + image_annotator_pb2.CropHintsAnnotation` + + :rtype: list + :returns: List of ``CropHint`` objects. + """ + return [CropHint.from_pb(hint) for hint in crop_hints.crop_hints] + + def _make_entity_from_pb(annotations): """Create an entity from a protobuf response. :type annotations: - :class:`~google.cloud.grpc.vision.v1.image_annotator_pb2.EntityAnnotation` + :class:`google.cloud.proto.vision.v1.image_annotator_pb2.EntityAnnotation` :param annotations: protobuf instance of ``EntityAnnotation``. :rtype: list @@ -207,6 +230,10 @@ def _entity_from_response_type(feature_type, results): return ImagePropertiesAnnotation.from_api_repr(results) elif feature_type == _SAFE_SEARCH_ANNOTATION: return SafeSearchAnnotation.from_api_repr(results) + elif feature_type == _CROP_HINTS_ANNOTATION: + crop_hints = results.get('cropHints', []) + detected_objects.extend( + CropHint.from_api_repr(result) for result in crop_hints) else: for result in results: detected_objects.append(EntityAnnotation.from_api_repr(result)) diff --git a/vision/google/cloud/vision/client.py b/vision/google/cloud/vision/client.py index 1fdc00ad81a7..23aee52e6739 100644 --- a/vision/google/cloud/vision/client.py +++ b/vision/google/cloud/vision/client.py @@ -90,7 +90,7 @@ def image(self, content=None, filename=None, source_uri=None): :param filename: Filename to image. :type source_uri: str - :param source_uri: Google Cloud Storage URI of image. + :param source_uri: URL or Google Cloud Storage URI of image. :rtype: :class:`~google.cloud.vision.image.Image` :returns: Image instance with the current client attached. diff --git a/vision/google/cloud/vision/crop_hint.py b/vision/google/cloud/vision/crop_hint.py new file mode 100644 index 000000000000..4d04fbb9b075 --- /dev/null +++ b/vision/google/cloud/vision/crop_hint.py @@ -0,0 +1,92 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Representation of Vision API's crop hints.""" + +from google.cloud.vision.geometry import Bounds + + +class CropHint(object): + """Representation of a crop hint returned from the Vision API. + + :type bounds: dict + :param bounds: Dictionary of boundary information of detected entity. + + :type confidence: float + :param confidence: Confidence of this being a salient region. + + :type importance_fraction: float + :param importance_fraction: Fraction of importance of this region. + """ + def __init__(self, bounds, confidence, importance_fraction): + self._bounds = bounds + self._confidence = confidence + self._importance_fraction = importance_fraction + + @classmethod + def from_api_repr(cls, response): + """Factory: construct ``CropHint`` from Vision API response. + + :type response: dict + :param response: Dictionary response from Vision API with entity data. + + :rtype: :class:`~google.cloud.vision.crop_hint.CropHint` + :returns: Instance of ``CropHint``. + """ + bounds = Bounds.from_api_repr(response.get('boundingPoly')) + confidence = response.get('confidence', 0.0) + importance_fraction = response.get('importanceFraction', 0.0) + return cls(bounds, confidence, importance_fraction) + + @classmethod + def from_pb(cls, response): + """Factory: construct ``CropHint`` from Vision gRPC response. + + :type response: :class:`google.cloud.proto.vision.v1.\ + image_annotator_pb2.CropHint` + :param response: gRPC response from Vision API with entity data. + + :rtype: :class:`~google.cloud.vision.crop_hint.CropHint` + :returns: Instance of ``CropHint``. + """ + bounds = Bounds.from_pb(response.bounding_poly) + return cls(bounds, response.confidence, response.importance_fraction) + + @property + def bounds(self): + """Bounding polygon of crop hints. + + :rtype: :class:`~google.cloud.vision.geometry.Bounds` + :returns: Instance of ``Bounds`` with populated vertices. + """ + return self._bounds + + @property + def confidence(self): + """Confidence of this being a salient region. Range [0, 1]. + + :rtype: float + :returns: float between 0 and 1, inclusive. + """ + return self._confidence + + @property + def importance_fraction(self): + """Fraction of importance of this salient region with respect to the + original image. + + :rtype: float + :returns: float + """ + return self._importance_fraction diff --git a/vision/google/cloud/vision/entity.py b/vision/google/cloud/vision/entity.py index 774c220732d6..28e63e54433c 100644 --- a/vision/google/cloud/vision/entity.py +++ b/vision/google/cloud/vision/entity.py @@ -74,7 +74,7 @@ def from_api_repr(cls, response): def from_pb(cls, response): """Factory: construct entity from Vision gRPC response. - :type response: :class:`~google.cloud.grpc.vision.v1.\ + :type response: :class:`google.cloud.proto.vision.v1.\ image_annotator_pb2.AnnotateImageResponse` :param response: gRPC response from Vision API with entity data. diff --git a/vision/google/cloud/vision/feature.py b/vision/google/cloud/vision/feature.py index 2870a38bf080..9d5173d28e39 100644 --- a/vision/google/cloud/vision/feature.py +++ b/vision/google/cloud/vision/feature.py @@ -21,13 +21,14 @@ class FeatureTypes(object): See: https://cloud.google.com/vision/reference/rest/v1/images/annotate#Type """ + CROP_HINTS = 'CROP_HINTS' FACE_DETECTION = 'FACE_DETECTION' + IMAGE_PROPERTIES = 'IMAGE_PROPERTIES' LANDMARK_DETECTION = 'LANDMARK_DETECTION' LOGO_DETECTION = 'LOGO_DETECTION' LABEL_DETECTION = 'LABEL_DETECTION' TEXT_DETECTION = 'TEXT_DETECTION' SAFE_SEARCH_DETECTION = 'SAFE_SEARCH_DETECTION' - IMAGE_PROPERTIES = 'IMAGE_PROPERTIES' class Feature(object): diff --git a/vision/google/cloud/vision/geometry.py b/vision/google/cloud/vision/geometry.py index 39b429a32ed8..9177566b0136 100644 --- a/vision/google/cloud/vision/geometry.py +++ b/vision/google/cloud/vision/geometry.py @@ -43,8 +43,8 @@ def from_api_repr(cls, vertices): def from_pb(cls, vertices): """Factory: construct BoundsBase instance from a protobuf response. - :type vertices: :class:`~google.cloud.grpc.vision.v1.\ - geometry_pb2.BoundingPoly` + :type vertices: :class:`google.cloud.proto.vision.v1.geometry_pb2.\ + BoundingPoly` :param vertices: List of vertices. :rtype: :class:`~google.cloud.vision.geometry.BoundsBase` or None diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py index 561339dce26a..4aed59b27619 100644 --- a/vision/google/cloud/vision/image.py +++ b/vision/google/cloud/vision/image.py @@ -17,6 +17,9 @@ from base64 import b64encode +from google.cloud.proto.vision.v1 import image_annotator_pb2 + +from google.cloud.vision._gax import _to_gapic_image from google.cloud._helpers import _to_bytes from google.cloud._helpers import _bytes_to_unicode from google.cloud.vision.feature import Feature @@ -33,7 +36,7 @@ class Image(object): :param filename: Filename to image. :type source_uri: str - :param source_uri: Google Cloud Storage URI of image. + :param source_uri: URL or Google Cloud Storage URI of image. :type client: :class:`~google.cloud.vision.client.Client` :param client: Instance of Vision client. @@ -69,12 +72,19 @@ def as_dict(self): return { 'content': _bytes_to_unicode(b64encode(self.content)) } - else: + elif self.source.startswith('gs://'): return { 'source': { 'gcs_image_uri': self.source } } + elif self.source.startswith(('http://', 'https://')): + return { + 'source': { + 'image_uri': self.source + } + } + raise ValueError('No image content or source found.') @property def content(self): @@ -106,6 +116,18 @@ def _detect_annotation(self, images): """ return self.client._vision_api.annotate(images) + def _detect_annotation_from_pb(self, requests_pb=None): + """Helper for pre-made requests. + + :type requests_pb: list + :param requests_pb: List of :class:`google.cloud.proto.vision.v1.\ + image_annotator_pb2.AnnotateImageRequest` + + :rtype: :class:`~google.cloud.vision.annotations.Annotations` + :returns: Instance of ``Annotations``. + """ + return self.client._vision_api.annotate(self, requests_pb=requests_pb) + def detect(self, features): """Detect multiple feature types. @@ -120,6 +142,33 @@ def detect(self, features): images = ((self, features),) return self._detect_annotation(images) + def detect_crop_hints(self, aspect_ratios=None, limit=10): + """Detect crop hints in image. + + :type aspect_ratios: list + :param aspect_ratios: (Optional) List of floats i.e. 4/3 == 1.33333. A + maximum of 16 aspect ratios can be given. + + :type limit: int + :param limit: (Optional) The number of crop hints to detect. + + :rtype: list + :returns: List of :class:`~google.cloud.vision.crop_hint.CropHints`. + """ + feature_type = image_annotator_pb2.Feature.CROP_HINTS + feature = image_annotator_pb2.Feature(type=feature_type, + max_results=limit) + image = _to_gapic_image(self) + crop_hints_params = image_annotator_pb2.CropHintsParams( + aspect_ratios=aspect_ratios) + image_context = image_annotator_pb2.ImageContext( + crop_hints_params=crop_hints_params) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + + annotations = self._detect_annotation_from_pb([request]) + return annotations[0].crop_hints + def detect_faces(self, limit=10): """Detect faces in image. diff --git a/vision/google/cloud/vision/likelihood.py b/vision/google/cloud/vision/likelihood.py index fd249e41dff1..a83b2991f2bb 100644 --- a/vision/google/cloud/vision/likelihood.py +++ b/vision/google/cloud/vision/likelihood.py @@ -17,7 +17,7 @@ from enum import Enum -from google.cloud.grpc.vision.v1 import image_annotator_pb2 +from google.cloud.proto.vision.v1 import image_annotator_pb2 def _get_pb_likelihood(likelihood): diff --git a/vision/setup.py b/vision/setup.py index 7915c92850ad..040ee1adbd41 100644 --- a/vision/setup.py +++ b/vision/setup.py @@ -51,13 +51,13 @@ REQUIREMENTS = [ 'enum34', - 'google-cloud-core >= 0.22.1, < 0.23dev', - 'gapic-google-cloud-vision-v1 >= 0.14.0, < 0.15dev', + 'google-cloud-core >= 0.22.1, < 0.24dev', + 'gapic-google-cloud-vision-v1 >= 0.15.0, < 0.16dev', ] setup( name='google-cloud-vision', - version='0.22.0', + version='0.23.0', description='Python Client for Google Cloud Vision', long_description=README, namespace_packages=[ diff --git a/vision/tox.ini b/vision/tox.ini index e0d8c2b172de..636ae5c18f82 100644 --- a/vision/tox.ini +++ b/vision/tox.ini @@ -7,6 +7,8 @@ localdeps = pip install --quiet --upgrade {toxinidir}/../core deps = {toxinidir}/../core + {toxinidir}/../_vendor/proto-google-cloud-vision-v1 + {toxinidir}/../_vendor/gapic-google-cloud-vision-v1 mock pytest covercmd = diff --git a/vision/unit_tests/_fixtures.py b/vision/unit_tests/_fixtures.py index a008c66274ce..edfb09e1da20 100644 --- a/vision/unit_tests/_fixtures.py +++ b/vision/unit_tests/_fixtures.py @@ -12,6 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. + +CROP_HINTS_RESPONSE = { + "responses": [{ + "cropHintsAnnotation": { + "cropHints": [{ + "importanceFraction": 1.22, + "boundingPoly": { + "vertices": [{ + "x": 77 + }, { + "x": 1821 + }, { + "x": 1821, + "y": 1306 + }, { + "x": 77, + "y": 1306 + }] + }, + "confidence": 0.5 + }, { + "importanceFraction": 1.2099999, + "boundingPoly": { + "vertices": [{}, { + "x": 1959 + }, { + "x": 1959, + "y": 1096 + }, { + "y": 1096 + }] + }, + "confidence": 0.29999998 + }] + } + }] +} + IMAGE_PROPERTIES_RESPONSE = { 'responses': [ { diff --git a/vision/unit_tests/test__gax.py b/vision/unit_tests/test__gax.py index 31383936d0df..89bdf5860dd7 100644 --- a/vision/unit_tests/test__gax.py +++ b/vision/unit_tests/test__gax.py @@ -59,6 +59,16 @@ def test_annotation(self): mock_anno.from_pb.assert_called_with('mock response data') gax_api._annotator_client.batch_annotate_images.assert_called() + def test_annotate_no_requests(self): + client = mock.Mock(spec_set=[]) + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + response = gax_api.annotate() + self.assertEqual(response, []) + gax_api._annotator_client.batch_annotate_images.assert_not_called() + def test_annotate_no_results(self): from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes @@ -87,7 +97,7 @@ def test_annotate_no_results(self): gax_api._annotator_client.batch_annotate_images.assert_called() def test_annotate_multiple_results(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 from google.cloud.vision.annotations import Annotations from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes @@ -119,6 +129,47 @@ def test_annotate_multiple_results(self): self.assertIsInstance(responses[1], Annotations) gax_api._annotator_client.batch_annotate_images.assert_called() + def test_annotate_with_pb_requests_results(self): + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.cloud.vision.annotations import Annotations + + client = mock.Mock(spec_set=[]) + + feature_type = image_annotator_pb2.Feature.CROP_HINTS + feature = image_annotator_pb2.Feature(type=feature_type, max_results=2) + + image_content = b'abc 1 2 3' + image = image_annotator_pb2.Image(content=image_content) + + aspect_ratios = [1.3333, 1.7777] + crop_hints_params = image_annotator_pb2.CropHintsParams( + aspect_ratios=aspect_ratios) + image_context = image_annotator_pb2.ImageContext( + crop_hints_params=crop_hints_params) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + + with mock.patch('google.cloud.vision._gax.image_annotator_client.' + 'ImageAnnotatorClient'): + gax_api = self._make_one(client) + + responses = [ + image_annotator_pb2.AnnotateImageResponse(), + image_annotator_pb2.AnnotateImageResponse(), + ] + response = image_annotator_pb2.BatchAnnotateImagesResponse( + responses=responses) + + gax_api._annotator_client = mock.Mock( + spec_set=['batch_annotate_images']) + gax_api._annotator_client.batch_annotate_images.return_value = response + responses = gax_api.annotate(requests_pb=[request]) + + self.assertEqual(len(responses), 2) + self.assertIsInstance(responses[0], Annotations) + self.assertIsInstance(responses[1], Annotations) + gax_api._annotator_client.batch_annotate_images.assert_called() + class Test__to_gapic_feature(unittest.TestCase): def _call_fut(self, feature): @@ -128,7 +179,7 @@ def _call_fut(self, feature): def test__to_gapic_feature(self): from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 feature = Feature(FeatureTypes.LABEL_DETECTION, 5) feature_pb = self._call_fut(feature) @@ -144,7 +195,7 @@ def _call_fut(self, image): def test__to_gapic_image_content(self): from google.cloud.vision.image import Image - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 image_content = b'abc 1 2 3' client = object() @@ -153,9 +204,9 @@ def test__to_gapic_image_content(self): self.assertIsInstance(image_pb, image_annotator_pb2.Image) self.assertEqual(image_pb.content, image_content) - def test__to_gapic_image_uri(self): + def test__to_gapic_gcs_image_uri(self): from google.cloud.vision.image import Image - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 image_uri = 'gs://1234/34.jpg' client = object() @@ -164,6 +215,26 @@ def test__to_gapic_image_uri(self): self.assertIsInstance(image_pb, image_annotator_pb2.Image) self.assertEqual(image_pb.source.gcs_image_uri, image_uri) + def test__to_gapic_image_uri(self): + from google.cloud.vision.image import Image + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + image_uri = 'http://1234/34.jpg' + client = object() + image = Image(client, source_uri=image_uri) + image_pb = self._call_fut(image) + self.assertIsInstance(image_pb, image_annotator_pb2.Image) + self.assertEqual(image_pb.source.image_uri, image_uri) + + def test__to_gapic_invalid_image_uri(self): + from google.cloud.vision.image import Image + + image_uri = 'ftp://1234/34.jpg' + client = object() + image = Image(client, source_uri=image_uri) + with self.assertRaises(ValueError): + self._call_fut(image) + def test__to_gapic_with_empty_image(self): image = mock.Mock( content=None, source=None, spec=['content', 'source']) diff --git a/vision/unit_tests/test__http.py b/vision/unit_tests/test__http.py index e0eb690352b0..1245181f1ba1 100644 --- a/vision/unit_tests/test__http.py +++ b/vision/unit_tests/test__http.py @@ -49,6 +49,66 @@ def test_call_annotate_with_no_results(self): self.assertEqual(len(response), 0) self.assertIsInstance(response, list) + def test_call_annotate_with_no_parameters(self): + client = mock.Mock(spec_set=['_connection']) + http_api = self._make_one(client) + http_api._connection = mock.Mock(spec_set=['api_request']) + + results = http_api.annotate() + self.assertEqual(results, []) + http_api._connection.api_request.assert_not_called() + + def test_call_annotate_with_pb_requests_results(self): + import json + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + sent = { + 'requests': [ + { + 'image': { + 'content': B64_IMAGE_CONTENT + }, + 'features': [ + { + 'maxResults': 2, + 'type': 'CROP_HINTS' + }, + ], + 'imageContext': { + 'cropHintsParams': { + 'aspectRatios': [1.3333, 1.7777] + }, + }, + }, + ], + } + client = mock.Mock(spec_set=['_connection']) + + feature_type = image_annotator_pb2.Feature.CROP_HINTS + feature = image_annotator_pb2.Feature(type=feature_type, max_results=2) + + image = image_annotator_pb2.Image(content=IMAGE_CONTENT) + + aspect_ratios = [1.3333, 1.7777] + crop_hints_params = image_annotator_pb2.CropHintsParams( + aspect_ratios=aspect_ratios) + image_context = image_annotator_pb2.ImageContext( + crop_hints_params=crop_hints_params) + request = image_annotator_pb2.AnnotateImageRequest( + image=image, features=[feature], image_context=image_context) + + http_api = self._make_one(client) + http_api._connection = mock.Mock(spec_set=['api_request']) + http_api._connection.api_request.return_value = {'responses': []} + + responses = http_api.annotate(requests_pb=[request]) + + http_api._connection.api_request.assert_called_with( + data=json.loads(json.dumps(sent)), method='POST', + path='/images:annotate') + self.assertEqual(responses, []) + self.assertEqual(len(responses), 0) + def test_call_annotate_with_more_than_one_result(self): from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes diff --git a/vision/unit_tests/test_annotations.py b/vision/unit_tests/test_annotations.py index 2a5fb5b8ca92..ead18cbb5f17 100644 --- a/vision/unit_tests/test_annotations.py +++ b/vision/unit_tests/test_annotations.py @@ -16,8 +16,8 @@ def _make_pb_entity(): - from google.cloud.grpc.vision.v1 import geometry_pb2 - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import geometry_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 from google.type import latlng_pb2 description = 'testing 1 2 3' @@ -78,7 +78,7 @@ def test_unsupported_http_annotation(self): def test_from_pb(self): from google.cloud.vision.likelihood import Likelihood from google.cloud.vision.safe_search import SafeSearchAnnotation - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 image_response = image_annotator_pb2.AnnotateImageResponse() annotations = self._make_one().from_pb(image_response) @@ -131,7 +131,7 @@ def _call_fut(self, annotations): return _make_faces_from_pb(annotations) def test_it(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 from google.cloud.vision.face import Face faces_pb = [image_annotator_pb2.FaceAnnotation()] @@ -147,7 +147,7 @@ def _call_fut(self, annotations): return _make_image_properties_from_pb(annotations) def test_it(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 from google.protobuf.wrappers_pb2 import FloatValue from google.type.color_pb2 import Color @@ -178,7 +178,7 @@ def _call_fut(self, image): return _process_image_annotations(image) def test_it(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 description = 'testing 1 2 3' locale = 'US' diff --git a/vision/unit_tests/test_client.py b/vision/unit_tests/test_client.py index e1f23f6d4be9..3e403a2e0aa7 100644 --- a/vision/unit_tests/test_client.py +++ b/vision/unit_tests/test_client.py @@ -206,6 +206,27 @@ def test_multiple_detection_from_content(self): self.assertEqual(logo_request['maxResults'], 2) self.assertEqual(logo_request['type'], 'LOGO_DETECTION') + def test_detect_crop_hints_from_source(self): + from google.cloud.vision.crop_hint import CropHint + from unit_tests._fixtures import CROP_HINTS_RESPONSE + returned = CROP_HINTS_RESPONSE + credentials = _make_credentials() + client = self._make_one(project=PROJECT, credentials=credentials, + use_gax=False) + client._connection = _Connection(returned) + image = client.image(source_uri=IMAGE_SOURCE) + crop_hints = image.detect_crop_hints(aspect_ratios=[1.3333], limit=3) + + self.assertEqual(len(crop_hints), 2) + self.assertIsInstance(crop_hints[0], CropHint) + image_request = client._connection._requested[0]['data']['requests'][0] + self.assertEqual(image_request['image']['source']['gcsImageUri'], + IMAGE_SOURCE) + self.assertEqual( + image_request['imageContext']['cropHintsParams']['aspectRatios'], + [1.3333]) + self.assertEqual(3, image_request['features'][0]['maxResults']) + def test_face_detection_from_source(self): from google.cloud.vision.face import Face from unit_tests._fixtures import FACE_DETECTION_RESPONSE diff --git a/vision/unit_tests/test_color.py b/vision/unit_tests/test_color.py index 9a9055dac7d2..6421fd5d3423 100644 --- a/vision/unit_tests/test_color.py +++ b/vision/unit_tests/test_color.py @@ -95,7 +95,7 @@ def _get_target_class(): return ImagePropertiesAnnotation def test_image_properties_annotation_from_pb(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 from google.protobuf.wrappers_pb2 import FloatValue from google.type.color_pb2 import Color @@ -121,7 +121,7 @@ def test_image_properties_annotation_from_pb(self): self.assertEqual(image_properties.colors[0].color.alpha, 1.0) def test_empty_image_properties_annotation_from_pb(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 image_properties_pb = image_annotator_pb2.ImageProperties() diff --git a/vision/unit_tests/test_crop_hint.py b/vision/unit_tests/test_crop_hint.py new file mode 100644 index 000000000000..2f617316e220 --- /dev/null +++ b/vision/unit_tests/test_crop_hint.py @@ -0,0 +1,53 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestCropHint(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.vision.crop_hint import CropHint + return CropHint + + def test_crop_hint_annotation(self): + from google.cloud.vision.geometry import Bounds + from unit_tests._fixtures import CROP_HINTS_RESPONSE + + response = CROP_HINTS_RESPONSE['responses'][0]['cropHintsAnnotation'] + crop_hints_dict = response['cropHints'][0] + crop_hints_class = self._get_target_class() + crop_hints = crop_hints_class.from_api_repr(crop_hints_dict) + + self.assertIsInstance(crop_hints.bounds, Bounds) + self.assertEqual(len(crop_hints.bounds.vertices), 4) + self.assertEqual(crop_hints.confidence, 0.5) + self.assertEqual(crop_hints.importance_fraction, 1.22) + + def test_crop_hint_annotation_pb(self): + from google.cloud.proto.vision.v1 import geometry_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 + + vertex = geometry_pb2.Vertex(x=1, y=2) + bounds = geometry_pb2.BoundingPoly(vertices=[vertex]) + crop_hint_pb = image_annotator_pb2.CropHint( + bounding_poly=bounds, confidence=1.23, importance_fraction=4.56) + crop_hints_class = self._get_target_class() + crop_hint = crop_hints_class.from_pb(crop_hint_pb) + + self.assertEqual(len(crop_hint.bounds.vertices), 1) + self.assertEqual(crop_hint.bounds.vertices[0].x_coordinate, 1) + self.assertEqual(crop_hint.bounds.vertices[0].y_coordinate, 2) + self.assertEqual(crop_hint.confidence, 1.23) + self.assertEqual(crop_hint.importance_fraction, 4.56) diff --git a/vision/unit_tests/test_entity.py b/vision/unit_tests/test_entity.py index d644de925aab..c9e828ec6f60 100644 --- a/vision/unit_tests/test_entity.py +++ b/vision/unit_tests/test_entity.py @@ -34,7 +34,7 @@ def test_logo_annotation(self): self.assertEqual(162, logo.bounds.vertices[0].y_coordinate) def test_logo_pb_annotation(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 description = 'testing 1 2 3' locale = 'US' diff --git a/vision/unit_tests/test_face.py b/vision/unit_tests/test_face.py index 801479bccc44..55814e0cad15 100644 --- a/vision/unit_tests/test_face.py +++ b/vision/unit_tests/test_face.py @@ -22,7 +22,7 @@ def _get_target_class(): return Face def _make_face_pb(self, *args, **kwargs): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 return image_annotator_pb2.FaceAnnotation(*args, **kwargs) @@ -34,8 +34,8 @@ def setUp(self): self.FACE_ANNOTATIONS['faceAnnotations'][0]) def test_face_from_pb(self): - from google.cloud.grpc.vision.v1 import image_annotator_pb2 - from google.cloud.grpc.vision.v1 import geometry_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import geometry_pb2 position_pb = geometry_pb2.Position(x=1.0, y=2.0, z=3.0) landmark_pb = image_annotator_pb2.FaceAnnotation.Landmark( diff --git a/vision/unit_tests/test_image.py b/vision/unit_tests/test_image.py index f9f00d98b3fd..5b8f3e74e166 100644 --- a/vision/unit_tests/test_image.py +++ b/vision/unit_tests/test_image.py @@ -72,6 +72,24 @@ def test_image_source_type_google_cloud_storage(self): self.assertEqual(None, image.content) self.assertEqual(image.as_dict(), as_dict) + def test_image_source_type_image_url(self): + url = 'http://www.example.com/image.jpg' + image = self._make_one(CLIENT_MOCK, source_uri=url) + as_dict = { + 'source': { + 'image_uri': url, + }, + } + + self.assertEqual(image.source, url) + self.assertIsNone(image.content) + self.assertEqual(image.as_dict(), as_dict) + + def test_image_no_valid_image_data(self): + image = self._make_one(CLIENT_MOCK, source_uri='ftp://notsupported') + with self.assertRaises(ValueError): + image.as_dict() + def test_cannot_set_both_source_and_content(self): image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT) diff --git a/vision/unit_tests/test_safe_search.py b/vision/unit_tests/test_safe_search.py index 5bc06ac47c52..b2ee7593e87b 100644 --- a/vision/unit_tests/test_safe_search.py +++ b/vision/unit_tests/test_safe_search.py @@ -38,9 +38,9 @@ def test_safe_search_annotation(self): def test_pb_safe_search_annotation(self): from google.cloud.vision.likelihood import Likelihood - from google.cloud.grpc.vision.v1.image_annotator_pb2 import ( + from google.cloud.proto.vision.v1.image_annotator_pb2 import ( Likelihood as LikelihoodPB) - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 possible = LikelihoodPB.Value('POSSIBLE') possible_name = Likelihood.POSSIBLE @@ -57,7 +57,7 @@ def test_pb_safe_search_annotation(self): def test_empty_pb_safe_search_annotation(self): from google.cloud.vision.likelihood import Likelihood - from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.proto.vision.v1 import image_annotator_pb2 unknown = Likelihood.UNKNOWN safe_search_annotation = image_annotator_pb2.SafeSearchAnnotation()