From ad5d131b09300884f823ea4d3241c825d2042be1 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Fri, 25 Jan 2019 09:04:06 -0800 Subject: [PATCH] Revert formatting change --- docs/conf.py | 179 +++---- src/google/__init__.py | 4 +- src/google/cloud/__init__.py | 4 +- src/google/cloud/happybase/batch.py | 73 ++- src/google/cloud/happybase/connection.py | 119 ++--- src/google/cloud/happybase/pool.py | 18 +- src/google/cloud/happybase/table.py | 206 ++++---- system_tests/attempt_system_tests.py | 61 +-- system_tests/happybase.py | 412 ++++++++------- system_tests/retry.py | 66 +-- system_tests/run_system_test.py | 30 +- system_tests/system_test_utils.py | 16 +- unit_tests/test_batch.py | 131 +++-- unit_tests/test_connection.py | 213 ++++---- unit_tests/test_pool.py | 41 +- unit_tests/test_table.py | 646 ++++++++++++----------- 16 files changed, 1084 insertions(+), 1135 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index eebce7b..3dec28c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -32,86 +32,86 @@ import sphinx_rtd_theme -ON_READ_THE_DOCS = os.environ.get("READTHEDOCS", None) == "True" +ON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) +sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' +#needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.todo", - "sphinx.ext.viewcode", + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. -source_suffix = ".rst" +source_suffix = '.rst' # The encoding of source files. -# source_encoding = 'utf-8-sig' +#source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = "index" +master_doc = 'index' # General information about the project. -project = u"google-cloud-happybase" -copyright = u"2014, Google" +project = u'google-cloud-happybase' +copyright = u'2014, Google' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -distro = get_distribution("google-cloud-happybase") -release = os.getenv("SPHINX_RELEASE", distro.version) +distro = get_distribution('google-cloud-happybase') +release = os.getenv('SPHINX_RELEASE', distro.version) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -# language = None +#language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -# today = '' +#today = '' # Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' +#today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build"] +exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None +#default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True +#add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -# add_module_names = True +#add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -# show_authors = False +#show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" +pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] +#modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- @@ -120,83 +120,83 @@ # a list of builtin themes. if not ON_READ_THE_DOCS: - html_theme = "sphinx_rtd_theme" + html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -# html_theme_options = {} +#html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] +#html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -# html_title = None +#html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None +#html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -# html_logo = None +#html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = "_static/images/favicon.ico" +html_favicon = '_static/images/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] -html_add_permalinks = "#" +html_add_permalinks = '#' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' +#html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -# html_use_smartypants = True +#html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -# html_sidebars = {} +#html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -# html_additional_pages = {} +#html_additional_pages = {} # If false, no module index is generated. -# html_domain_indices = True +#html_domain_indices = True # If false, no index is generated. -# html_use_index = True +#html_use_index = True # If true, the index is split into individual pages for each letter. -# html_split_index = False +#html_split_index = False # If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True +#html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True +#html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True +#html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -# html_use_opensearch = '' +#html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None +#html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = "googlecloudhappybasedoc" +htmlhelp_basename = 'googlecloudhappybasedoc' html_context = {} @@ -204,47 +204,45 @@ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', } metadata = distro.get_metadata(distro.PKG_INFO) -author = message_from_string(metadata).get("Author") +author = message_from_string(metadata).get('Author') # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ( - "index", - "google-cloud-happybase.tex", - u"google-cloud-happybase Documentation", - author, - "manual", - ) + ('index', 'google-cloud-happybase.tex', + u'google-cloud-happybase Documentation', + author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -# latex_logo = None +#latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -# latex_use_parts = False +#latex_use_parts = False # If true, show page references after internal links. -# latex_show_pagerefs = False +#latex_show_pagerefs = False # If true, show URL addresses after external links. -# latex_show_urls = False +#latex_show_urls = False # Documents to append as an appendix to all manuals. -# latex_appendices = [] +#latex_appendices = [] # If false, no module index is generated. -# latex_domain_indices = True +#latex_domain_indices = True # -- Options for manual page output -------------------------------------------- @@ -252,17 +250,13 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ( - "index", - "google-cloud-happybase", - u"google-cloud-happybase Documentation", - [author], - 1, - ) + ('index', 'google-cloud-happybase', + u'google-cloud-happybase Documentation', + [author], 1) ] # If true, show URL addresses after external links. -# man_show_urls = False +#man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ @@ -271,39 +265,32 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ( - "index", - "google-cloud-happybase", - u"google-cloud-happybase Documentation", - author, - "google-cloud-happybase", - "Python API for Google Cloud HappyBase.", - "Miscellaneous", - ) + ('index', 'google-cloud-happybase', + u'google-cloud-happybase Documentation', + author, 'google-cloud-happybase', 'Python API for Google Cloud HappyBase.', + 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. -# texinfo_appendices = [] +#texinfo_appendices = [] # If false, no module index is generated. -# texinfo_domain_indices = True +#texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' +#texinfo_show_urls = 'footnote' # This pulls class descriptions from the class docstring, # and parameter definitions from the __init__ docstring. -autoclass_content = "both" +autoclass_content = 'both' # Configuration for intersphinx: # Refer to the Python standard library and the oauth2client and # httplib2 libraries. intersphinx_mapping = { - "httplib2": ("http://httplib2.readthedocs.io/en/latest/", None), - "google-cloud-python": ( - "https://google-cloud-python.readthedocs.io/en/latest/", - None, - ), - "oauth2client": ("http://oauth2client.readthedocs.io/en/latest", None), - "python": ("https://docs.python.org/2", None), + 'httplib2': ('http://httplib2.readthedocs.io/en/latest/', None), + 'google-cloud-python': ( + 'https://google-cloud-python.readthedocs.io/en/latest/', None), + 'oauth2client': ('http://oauth2client.readthedocs.io/en/latest', None), + 'python': ('https://docs.python.org/2', None), } diff --git a/src/google/__init__.py b/src/google/__init__.py index 102caa9..1fbf189 100644 --- a/src/google/__init__.py +++ b/src/google/__init__.py @@ -13,6 +13,6 @@ # limitations under the License. try: - __import__("pkg_resources").declare_namespace(__name__) + __import__('pkg_resources').declare_namespace(__name__) except ImportError: - __path__ = __import__("pkgutil").extend_path(__path__, __name__) + __path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/google/cloud/__init__.py b/src/google/cloud/__init__.py index 102caa9..1fbf189 100644 --- a/src/google/cloud/__init__.py +++ b/src/google/cloud/__init__.py @@ -13,6 +13,6 @@ # limitations under the License. try: - __import__("pkg_resources").declare_namespace(__name__) + __import__('pkg_resources').declare_namespace(__name__) except ImportError: - __path__ = __import__("pkgutil").extend_path(__path__, __name__) + __path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/google/cloud/happybase/batch.py b/src/google/cloud/happybase/batch.py index e34a4ed..2b9bf1f 100644 --- a/src/google/cloud/happybase/batch.py +++ b/src/google/cloud/happybase/batch.py @@ -27,9 +27,8 @@ _WAL_SENTINEL = object() # Assumed granularity of timestamps in Cloud Bigtable. _ONE_MILLISECOND = datetime.timedelta(microseconds=1000) -_WAL_WARNING = ( - "The wal argument (Write-Ahead-Log) is not " "supported by Cloud Bigtable." -) +_WAL_WARNING = ('The wal argument (Write-Ahead-Log) is not ' + 'supported by Cloud Bigtable.') class Batch(object): @@ -73,24 +72,17 @@ class Batch(object): is not positive. """ - def __init__( - self, - table, - timestamp=None, - batch_size=None, - transaction=False, - wal=_WAL_SENTINEL, - ): + def __init__(self, table, timestamp=None, batch_size=None, + transaction=False, wal=_WAL_SENTINEL): if wal is not _WAL_SENTINEL: warnings.warn(_WAL_WARNING) if batch_size is not None: if transaction: - raise TypeError( - "When batch_size is set, a Batch cannot be " "transactional" - ) + raise TypeError('When batch_size is set, a Batch cannot be ' + 'transactional') if batch_size <= 0: - raise ValueError("batch_size must be positive") + raise ValueError('batch_size must be positive') self._table = table self._batch_size = batch_size @@ -167,13 +159,14 @@ def put(self, row, data, wal=_WAL_SENTINEL): row_object = self._get_row(row) # Make sure all the keys are valid before beginning # to add mutations. - column_pairs = _get_column_pairs(six.iterkeys(data), require_qualifier=True) + column_pairs = _get_column_pairs(six.iterkeys(data), + require_qualifier=True) for column_family_id, column_qualifier in column_pairs: - value = data[(column_family_id + ":" + column_qualifier).encode("utf-8")] - row_object.set_cell( - column_family_id, column_qualifier, value, timestamp=self._timestamp - ) + value = data[(column_family_id + ':' + + column_qualifier).encode('utf-8')] + row_object.set_cell(column_family_id, column_qualifier, + value, timestamp=self._timestamp) self._mutation_count += len(data) self._try_send() @@ -199,18 +192,15 @@ def _delete_columns(self, columns, row_object): for column_family_id, column_qualifier in column_pairs: if column_qualifier is None: if self._delete_range is not None: - raise ValueError( - "The Cloud Bigtable API does not support " - "adding a timestamp to " - '"DeleteFromFamily" ' - ) - row_object.delete_cells( - column_family_id, columns=row_object.ALL_COLUMNS - ) + raise ValueError('The Cloud Bigtable API does not support ' + 'adding a timestamp to ' + '"DeleteFromFamily" ') + row_object.delete_cells(column_family_id, + columns=row_object.ALL_COLUMNS) else: - row_object.delete_cell( - column_family_id, column_qualifier, time_range=self._delete_range - ) + row_object.delete_cell(column_family_id, + column_qualifier, + time_range=self._delete_range) def delete(self, row, columns=None, wal=_WAL_SENTINEL): """Delete data from a row in the table owned by this batch. @@ -244,11 +234,9 @@ def delete(self, row, columns=None, wal=_WAL_SENTINEL): if columns is None: # Delete entire row. if self._delete_range is not None: - raise ValueError( - "The Cloud Bigtable API does not support " - 'adding a timestamp to "DeleteFromRow" ' - "mutations" - ) + raise ValueError('The Cloud Bigtable API does not support ' + 'adding a timestamp to "DeleteFromRow" ' + 'mutations') row_object.delete() self._mutation_count += 1 else: @@ -318,20 +306,21 @@ def _get_column_pairs(columns, require_qualifier=False): column_pairs = [] for column in columns: if isinstance(column, six.binary_type): - column = column.decode("utf-8") + column = column.decode('utf-8') # Remove trailing colons (i.e. for standalone column family). - if column.endswith(u":"): + if column.endswith(u':'): column = column[:-1] - num_colons = column.count(u":") + num_colons = column.count(u':') if num_colons == 0: # column is a column family. if require_qualifier: - raise ValueError("column does not contain a qualifier", column) + raise ValueError('column does not contain a qualifier', + column) else: column_pairs.append([column, None]) elif num_colons == 1: - column_pairs.append(column.split(u":")) + column_pairs.append(column.split(u':')) else: - raise ValueError("Column contains the : separator more than once") + raise ValueError('Column contains the : separator more than once') return column_pairs diff --git a/src/google/cloud/happybase/connection.py b/src/google/cloud/happybase/connection.py index 9fd5630..4a0e77b 100644 --- a/src/google/cloud/happybase/connection.py +++ b/src/google/cloud/happybase/connection.py @@ -47,19 +47,17 @@ DEFAULT_COMPAT = None DEFAULT_PROTOCOL = None -_LEGACY_ARGS = frozenset(("host", "port", "compat", "transport", "protocol")) -_BASE_DISABLE = "Cloud Bigtable has no concept of enabled / disabled tables." -_DISABLE_DELETE_MSG = ( - "The disable argument should not be used in " "delete_table(). " -) + _BASE_DISABLE -_ENABLE_TMPL = "Connection.enable_table(%r) was called, but " + _BASE_DISABLE -_DISABLE_TMPL = "Connection.disable_table(%r) was called, but " + _BASE_DISABLE -_IS_ENABLED_TMPL = "Connection.is_table_enabled(%r) was called, but " + _BASE_DISABLE -_COMPACT_TMPL = ( - "Connection.compact_table(%r, major=%r) was called, but the " - "Cloud Bigtable API handles table compactions automatically " - "and does not expose an API for it." -) +_LEGACY_ARGS = frozenset(('host', 'port', 'compat', 'transport', 'protocol')) +_BASE_DISABLE = 'Cloud Bigtable has no concept of enabled / disabled tables.' +_DISABLE_DELETE_MSG = ('The disable argument should not be used in ' + 'delete_table(). ') + _BASE_DISABLE +_ENABLE_TMPL = 'Connection.enable_table(%r) was called, but ' + _BASE_DISABLE +_DISABLE_TMPL = 'Connection.disable_table(%r) was called, but ' + _BASE_DISABLE +_IS_ENABLED_TMPL = ('Connection.is_table_enabled(%r) was called, but ' + + _BASE_DISABLE) +_COMPACT_TMPL = ('Connection.compact_table(%r, major=%r) was called, but the ' + 'Cloud Bigtable API handles table compactions automatically ' + 'and does not expose an API for it.') def _get_instance(): @@ -79,23 +77,20 @@ def _get_instance(): :raises ValueError: if there is a failed location or any number of instances other than one. """ - client_kwargs = {"admin": True} + client_kwargs = {'admin': True} client = Client(**client_kwargs) instances, failed_locations = client.list_instances() if failed_locations: - raise ValueError( - "Determining instance via ListInstances encountered " "failed locations." - ) + raise ValueError('Determining instance via ListInstances encountered ' + 'failed locations.') num_instances = len(instances) if num_instances == 0: - raise ValueError("This client doesn't have access to any instances.") + raise ValueError('This client doesn\'t have access to any instances.') if num_instances > 1: - raise ValueError( - "This client has access to more than one instance. " - "Please directly pass the instance you'd " - "like to use." - ) + raise ValueError('This client has access to more than one instance. ' + 'Please directly pass the instance you\'d ' + 'like to use.') return instances[0] @@ -137,31 +132,18 @@ class Connection(object): _instance = None - def __init__( - self, - autoconnect=True, - table_prefix=None, - table_prefix_separator="_", - instance=None, - **kwargs - ): + def __init__(self, autoconnect=True, table_prefix=None, + table_prefix_separator='_', instance=None, **kwargs): self._handle_legacy_args(kwargs) if table_prefix is not None: if not isinstance(table_prefix, six.string_types): - raise TypeError( - "table_prefix must be a string", - "received", - table_prefix, - type(table_prefix), - ) + raise TypeError('table_prefix must be a string', 'received', + table_prefix, type(table_prefix)) if not isinstance(table_prefix_separator, six.string_types): - raise TypeError( - "table_prefix_separator must be a string", - "received", - table_prefix_separator, - type(table_prefix_separator), - ) + raise TypeError('table_prefix_separator must be a string', + 'received', table_prefix_separator, + type(table_prefix_separator)) self.table_prefix = table_prefix self.table_prefix_separator = table_prefix_separator @@ -187,17 +169,15 @@ def _handle_legacy_args(arguments_dict): """ common_args = _LEGACY_ARGS.intersection(six.iterkeys(arguments_dict)) if common_args: - all_args = ", ".join(common_args) - message = ( - "The HappyBase legacy arguments %s were used. These " - "arguments are unused by google-cloud." % (all_args,) - ) + all_args = ', '.join(common_args) + message = ('The HappyBase legacy arguments %s were used. These ' + 'arguments are unused by google-cloud.' % (all_args,)) warnings.warn(message) for arg_name in common_args: arguments_dict.pop(arg_name) if arguments_dict: unexpected_names = arguments_dict.keys() - raise TypeError("Received unexpected arguments", unexpected_names) + raise TypeError('Received unexpected arguments', unexpected_names) def open(self): """Open the underlying transport to Cloud Bigtable. @@ -259,17 +239,15 @@ def tables(self): :returns: List of string table names. """ low_level_table_instances = self._instance.list_tables() - table_names = [ - table_instance.table_id for table_instance in low_level_table_instances - ] + table_names = [table_instance.table_id + for table_instance in low_level_table_instances] # Filter using prefix, and strip prefix from names if self.table_prefix is not None: - prefix = self._table_name("") + prefix = self._table_name('') offset = len(prefix) - table_names = [ - name[offset:] for name in table_names if name.startswith(prefix) - ] + table_names = [name[offset:] for name in table_names + if name.startswith(prefix)] return table_names @@ -312,19 +290,18 @@ def create_table(self, name, families): table exists. """ if not isinstance(families, dict): - raise TypeError("families arg must be a dictionary") + raise TypeError('families arg must be a dictionary') if not families: - raise ValueError( - "Cannot create table %r (no column " "families specified)" % (name,) - ) + raise ValueError('Cannot create table %r (no column ' + 'families specified)' % (name,)) # Parse all keys before making any API requests. gc_rule_dict = {} for column_family_name, option in families.items(): if isinstance(column_family_name, six.binary_type): - column_family_name = column_family_name.decode("utf-8") - if column_family_name.endswith(":"): + column_family_name = column_family_name.decode('utf-8') + if column_family_name.endswith(':'): column_family_name = column_family_name[:-1] gc_rule_dict[column_family_name] = _parse_family_option(option) @@ -445,19 +422,17 @@ def _parse_family_option(option): """ result = option if isinstance(result, dict): - if not set(result.keys()) <= set(["max_versions", "time_to_live"]): - all_keys = ", ".join(repr(key) for key in result.keys()) - warning_msg = ( - "Cloud Bigtable only supports max_versions and " - "time_to_live column family settings. " - "Received: %s" % (all_keys,) - ) + if not set(result.keys()) <= set(['max_versions', 'time_to_live']): + all_keys = ', '.join(repr(key) for key in result.keys()) + warning_msg = ('Cloud Bigtable only supports max_versions and ' + 'time_to_live column family settings. ' + 'Received: %s' % (all_keys,)) warnings.warn(warning_msg) - max_num_versions = result.get("max_versions") + max_num_versions = result.get('max_versions') max_age = None - if "time_to_live" in result: - max_age = datetime.timedelta(seconds=result["time_to_live"]) + if 'time_to_live' in result: + max_age = datetime.timedelta(seconds=result['time_to_live']) versions_rule = age_rule = None if max_num_versions is not None: diff --git a/src/google/cloud/happybase/pool.py b/src/google/cloud/happybase/pool.py index c24bd97..552dc5e 100644 --- a/src/google/cloud/happybase/pool.py +++ b/src/google/cloud/happybase/pool.py @@ -62,22 +62,21 @@ class ConnectionPool(object): :class:`ValueError ` if ``size`` is not positive. """ - def __init__(self, size, **kwargs): if not isinstance(size, six.integer_types): - raise TypeError("Pool size arg must be an integer") + raise TypeError('Pool size arg must be an integer') if size < _MIN_POOL_SIZE: - raise ValueError("Pool size must be positive") + raise ValueError('Pool size must be positive') self._lock = threading.Lock() self._queue = six.moves.queue.LifoQueue(maxsize=size) self._thread_connections = threading.local() connection_kwargs = kwargs - connection_kwargs["autoconnect"] = False - if "instance" not in connection_kwargs: - connection_kwargs["instance"] = _get_instance() + connection_kwargs['autoconnect'] = False + if 'instance' not in connection_kwargs: + connection_kwargs['instance'] = _get_instance() for _ in six.moves.range(size): connection = Connection(**connection_kwargs) @@ -98,9 +97,8 @@ def _acquire_connection(self, timeout=None): try: return self._queue.get(block=True, timeout=timeout) except six.moves.queue.Empty: - raise NoConnectionsAvailable( - "No connection available from pool " "within specified timeout" - ) + raise NoConnectionsAvailable('No connection available from pool ' + 'within specified timeout') @contextlib.contextmanager def connection(self, timeout=None): @@ -127,7 +125,7 @@ def connection(self, timeout=None): retrieved from the pool before the ``timeout`` (only if a timeout is specified). """ - connection = getattr(self._thread_connections, "current", None) + connection = getattr(self._thread_connections, 'current', None) retrieved_new_cnxn = False if connection is None: diff --git a/src/google/cloud/happybase/table.py b/src/google/cloud/happybase/table.py index aa59b9d..5b78cbe 100644 --- a/src/google/cloud/happybase/table.py +++ b/src/google/cloud/happybase/table.py @@ -41,8 +41,8 @@ from google.cloud.happybase.batch import Batch -_PACK_I64 = struct.Struct(">q").pack -_UNPACK_I64 = struct.Struct(">q").unpack +_PACK_I64 = struct.Struct('>q').pack +_UNPACK_I64 = struct.Struct('>q').unpack _SIMPLE_GC_RULES = (MaxAgeGCRule, MaxVersionsGCRule) @@ -65,14 +65,10 @@ def make_row(cell_map, include_timestamp): :raises: :class:`NotImplementedError ` always """ - raise NotImplementedError( - "The Cloud Bigtable API output is not the same " - "as the output from the Thrift server, so this " - "helper can not be implemented.", - "Called with", - cell_map, - include_timestamp, - ) + raise NotImplementedError('The Cloud Bigtable API output is not the same ' + 'as the output from the Thrift server, so this ' + 'helper can not be implemented.', 'Called with', + cell_map, include_timestamp) def make_ordered_row(sorted_columns, include_timestamp): @@ -93,14 +89,10 @@ def make_ordered_row(sorted_columns, include_timestamp): :raises: :class:`NotImplementedError ` always """ - raise NotImplementedError( - "The Cloud Bigtable API output is not the same " - "as the output from the Thrift server, so this " - "helper can not be implemented.", - "Called with", - sorted_columns, - include_timestamp, - ) + raise NotImplementedError('The Cloud Bigtable API output is not the same ' + 'as the output from the Thrift server, so this ' + 'helper can not be implemented.', 'Called with', + sorted_columns, include_timestamp) class Table(object): @@ -122,10 +114,11 @@ def __init__(self, name, connection): self.connection = connection self._low_level_table = None if self.connection is not None: - self._low_level_table = _LowLevelTable(self.name, self.connection._instance) + self._low_level_table = _LowLevelTable(self.name, + self.connection._instance) def __repr__(self): - return "" % (self.name,) + return '' % (self.name,) def families(self): """Retrieve the column families for this table. @@ -152,10 +145,8 @@ def regions(self): :raises: :class:`NotImplementedError ` always """ - raise NotImplementedError( - "The Cloud Bigtable API does not have a " - "concept of splitting a table into regions." - ) + raise NotImplementedError('The Cloud Bigtable API does not have a ' + 'concept of splitting a table into regions.') def row(self, row, columns=None, timestamp=None, include_timestamp=False): """Retrieve a single row of data. @@ -191,17 +182,19 @@ def row(self, row, columns=None, timestamp=None, include_timestamp=False): if columns is not None: filters.append(_columns_filter_helper(columns)) # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, filters=filters) + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) - partial_row_data = self._low_level_table.read_row(row, filter_=filter_) + partial_row_data = self._low_level_table.read_row( + row, filter_=filter_) if partial_row_data is None: return {} - return _partial_row_to_dict( - partial_row_data, include_timestamp=include_timestamp - ) + return _partial_row_to_dict(partial_row_data, + include_timestamp=include_timestamp) - def rows(self, rows, columns=None, timestamp=None, include_timestamp=False): + def rows(self, rows, columns=None, timestamp=None, + include_timestamp=False): """Retrieve multiple rows of data. All optional arguments behave the same in this method as they do in @@ -240,10 +233,11 @@ def rows(self, rows, columns=None, timestamp=None, include_timestamp=False): row_set = _get_row_set_from_rows(rows) # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, filters=filters) + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) rows_generator = self._low_level_table.read_rows( - row_set=row_set, filter_=filter_ - ) + row_set=row_set, + filter_=filter_) # NOTE: We could use max_loops = 1000 or some similar value to ensure # that the stream isn't open too long. @@ -251,14 +245,12 @@ def rows(self, rows, columns=None, timestamp=None, include_timestamp=False): for rowdata in rows_generator: curr_row_data = rowdata curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp - ) + curr_row_data, include_timestamp=include_timestamp) result.append((curr_row_data.row_key, curr_row_dict)) return result - def cells( - self, row, column, versions=None, timestamp=None, include_timestamp=False - ): + def cells(self, row, column, versions=None, timestamp=None, + include_timestamp=False): """Retrieve multiple versions of a single cell from the table. :type row: str @@ -285,9 +277,8 @@ def cells( :returns: List of values in the cell (with timestamps if ``include_timestamp`` is :data:`True`). """ - filter_ = _filter_chain_helper( - column=column, versions=versions, timestamp=timestamp - ) + filter_ = _filter_chain_helper(column=column, versions=versions, + timestamp=timestamp) partial_row_data = self._low_level_table.read_row(row, filter_=filter_) if partial_row_data is None: return [] @@ -295,25 +286,18 @@ def cells( cells = partial_row_data._cells # We know that `_filter_chain_helper` has already verified that # column will split as such. - column = column.decode("utf-8") - column_family_id, column_qualifier = column.split(":") + column = column.decode('utf-8') + column_family_id, column_qualifier = column.split(':') # NOTE: We expect the only key in `cells` is `column_family_id` # and the only key `cells[column_family_id]` is # `column_qualifier`. But we don't check that this is true. - curr_cells = cells[column_family_id][column_qualifier.encode("utf-8")] - return _cells_to_pairs(curr_cells, include_timestamp=include_timestamp) - - def scan( - self, - row_start=None, - row_stop=None, - row_prefix=None, - columns=None, - timestamp=None, - include_timestamp=False, - limit=None, - **kwargs - ): + curr_cells = cells[column_family_id][column_qualifier.encode('utf-8')] + return _cells_to_pairs( + curr_cells, include_timestamp=include_timestamp) + + def scan(self, row_start=None, row_stop=None, row_prefix=None, + columns=None, timestamp=None, + include_timestamp=False, limit=None, **kwargs): """Create a scanner for data in this table. This method returns a generator that can be used for looping over the @@ -398,21 +382,18 @@ def scan( ``filter`` is used. """ row_start, row_stop, filter_chain = _scan_filter_helper( - row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs - ) + row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs) row_set = _get_row_set_object(row_start, row_stop) rows_generator = self._low_level_table.read_rows( - row_set=row_set, limit=limit, filter_=filter_chain - ) + row_set=row_set, limit=limit, filter_=filter_chain) for rowdata in rows_generator: curr_row_data = rowdata # NOTE: We expect len(rows_dict) == 0, but don't check it. curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp - ) + curr_row_data, include_timestamp=include_timestamp) yield (curr_row_data.row_key, curr_row_dict) def put(self, row, data, timestamp=None, wal=_WAL_SENTINEL): @@ -482,9 +463,8 @@ def delete(self, row, columns=None, timestamp=None, wal=_WAL_SENTINEL): with self.batch(timestamp=timestamp, wal=wal) as batch: batch.delete(row, columns) - def batch( - self, timestamp=None, batch_size=None, transaction=False, wal=_WAL_SENTINEL - ): + def batch(self, timestamp=None, batch_size=None, transaction=False, + wal=_WAL_SENTINEL): """Create a new batch operation for this table. This method returns a new @@ -517,13 +497,8 @@ def batch( :rtype: :class:`~google.cloud.bigtable.happybase.batch.Batch` :returns: A batch bound to this table. """ - return Batch( - self, - timestamp=timestamp, - batch_size=batch_size, - transaction=transaction, - wal=wal, - ) + return Batch(self, timestamp=timestamp, batch_size=batch_size, + transaction=transaction, wal=wal) def counter_get(self, row, column): """Retrieve the current value of a counter column. @@ -594,8 +569,8 @@ def counter_inc(self, row, column, value=1): """ row = self._low_level_table.row(row, append=True) if isinstance(column, six.binary_type): - column = column.decode("utf-8") - column_family_id, column_qualifier = column.split(":") + column = column.decode('utf-8') + column_family_id, column_qualifier = column.split(':') row.increment_cell_value(column_family_id, column_qualifier, value) # See AppendRow.commit() will return a dictionary: # { @@ -610,11 +585,10 @@ def counter_inc(self, row, column, value=1): modified_cells = row.commit() # Get the cells in the modified column, column_cells = modified_cells[column_family_id][ - column_qualifier.encode("utf-8") - ] + column_qualifier.encode('utf-8')] # Make sure there is exactly one cell in the column. if len(column_cells) != 1: - raise ValueError("Expected server to return one modified cell.") + raise ValueError('Expected server to return one modified cell.') column_cell = column_cells[0] # Get the bytes value from the column and convert it to an integer. bytes_value = column_cell[0] @@ -677,15 +651,14 @@ def _gc_rule_to_dict(gc_rule): if gc_rule is None: result = {} elif isinstance(gc_rule, MaxAgeGCRule): - result = {"time_to_live": gc_rule.max_age.total_seconds()} + result = {'time_to_live': gc_rule.max_age.total_seconds()} elif isinstance(gc_rule, MaxVersionsGCRule): - result = {"max_versions": gc_rule.max_num_versions} + result = {'max_versions': gc_rule.max_num_versions} elif isinstance(gc_rule, GCRuleIntersection): if len(gc_rule.rules) == 2: rule1, rule2 = gc_rule.rules - if isinstance(rule1, _SIMPLE_GC_RULES) and isinstance( - rule2, _SIMPLE_GC_RULES - ): + if (isinstance(rule1, _SIMPLE_GC_RULES) and + isinstance(rule2, _SIMPLE_GC_RULES)): rule1 = _gc_rule_to_dict(rule1) rule2 = _gc_rule_to_dict(rule2) key1, = rule1.keys() @@ -709,7 +682,7 @@ def _next_char(str_val, index): in ``str_val``. """ ord_val = six.indexbytes(str_val, index) - return _to_bytes(chr(ord_val + 1), encoding="latin-1") + return _to_bytes(chr(ord_val + 1), encoding='latin-1') def _string_successor(str_val): @@ -730,18 +703,18 @@ def _string_successor(str_val): :rtype: str :returns: The next string in lexical order after ``str_val``. """ - str_val = _to_bytes(str_val, encoding="latin-1") - if str_val == b"": + str_val = _to_bytes(str_val, encoding='latin-1') + if str_val == b'': return str_val index = len(str_val) - 1 while index >= 0: - if six.indexbytes(str_val, index) != 0xFF: + if six.indexbytes(str_val, index) != 0xff: break index -= 1 if index == -1: - return b"" + return b'' return str_val[:index] + _next_char(str_val, index) @@ -843,14 +816,16 @@ def _partial_row_to_dict(partial_row_data, include_timestamp=False): """ result = {} for column, cells in six.iteritems(partial_row_data.to_dict()): - cell_vals = _cells_to_pairs(cells, include_timestamp=include_timestamp) + cell_vals = _cells_to_pairs(cells, + include_timestamp=include_timestamp) # NOTE: We assume there is exactly 1 version since we used that in # our filter, but we don't check this. result[column] = cell_vals[0] return result -def _filter_chain_helper(column=None, versions=None, timestamp=None, filters=None): +def _filter_chain_helper(column=None, versions=None, timestamp=None, + filters=None): """Create filter chain to limit a results set. :type column: str @@ -879,8 +854,8 @@ def _filter_chain_helper(column=None, versions=None, timestamp=None, filters=Non if column is not None: if isinstance(column, six.binary_type): - column = column.decode("utf-8") - column_family_id, column_qualifier = column.split(":") + column = column.decode('utf-8') + column_family_id, column_qualifier = column.split(':') fam_filter = FamilyNameRegexFilter(column_family_id) qual_filter = ColumnQualifierRegexFilter(column_qualifier) filters.extend([fam_filter, qual_filter]) @@ -892,50 +867,44 @@ def _filter_chain_helper(column=None, versions=None, timestamp=None, filters=Non num_filters = len(filters) if num_filters == 0: - raise ValueError("Must have at least one filter.") + raise ValueError('Must have at least one filter.') elif num_filters == 1: return filters[0] else: return RowFilterChain(filters=filters) -def _scan_filter_helper( - row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs -): +def _scan_filter_helper(row_start, row_stop, row_prefix, columns, + timestamp, limit, kwargs): """Helper for :meth:`scan`: build up a filter chain.""" - filter_ = kwargs.pop("filter", None) + filter_ = kwargs.pop('filter', None) legacy_args = [] - for kw_name in ("batch_size", "scan_batching", "sorted_columns"): + for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): if kw_name in kwargs: legacy_args.append(kw_name) kwargs.pop(kw_name) if legacy_args: - legacy_args = ", ".join(legacy_args) - message = ( - "The HappyBase legacy arguments %s were used. These " - "arguments are unused by google-cloud." % (legacy_args,) - ) + legacy_args = ', '.join(legacy_args) + message = ('The HappyBase legacy arguments %s were used. These ' + 'arguments are unused by google-cloud.' % (legacy_args,)) warnings.warn(message) if kwargs: - raise TypeError("Received unexpected arguments", kwargs.keys()) + raise TypeError('Received unexpected arguments', kwargs.keys()) if limit is not None and limit < 1: - raise ValueError("limit must be positive") + raise ValueError('limit must be positive') if row_prefix is not None: if row_start is not None or row_stop is not None: - raise ValueError( - "row_prefix cannot be combined with " "row_start or row_stop" - ) + raise ValueError('row_prefix cannot be combined with ' + 'row_start or row_stop') row_start = row_prefix row_stop = _string_successor(row_prefix) filters = [] if isinstance(filter_, six.string_types): - raise TypeError( - "Specifying filters as a string is not supported " - "by Cloud Bigtable. Use a " - "google.cloud.bigtable.row.RowFilter instead." - ) + raise TypeError('Specifying filters as a string is not supported ' + 'by Cloud Bigtable. Use a ' + 'google.cloud.bigtable.row.RowFilter instead.') elif filter_ is not None: filters.append(filter_) @@ -943,7 +912,8 @@ def _scan_filter_helper( filters.append(_columns_filter_helper(columns)) # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, filters=filters) + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) return row_start, row_stop, filter_ @@ -967,14 +937,15 @@ def _columns_filter_helper(columns): fam_filter = FamilyNameRegexFilter(column_family_id) if column_qualifier is not None: qual_filter = ColumnQualifierRegexFilter(column_qualifier) - combined_filter = RowFilterChain(filters=[fam_filter, qual_filter]) + combined_filter = RowFilterChain( + filters=[fam_filter, qual_filter]) filters.append(combined_filter) else: filters.append(fam_filter) num_filters = len(filters) if num_filters == 0: - raise ValueError("Must have at least one filter.") + raise ValueError('Must have at least one filter.') elif num_filters == 1: return filters[0] else: @@ -985,7 +956,8 @@ def _get_row_set_object(row_start, row_stop): """Return a RowSet object for the given row_start and row_stop """ row_set = RowSet() - row_set.add_row_range_from_keys(start_key=row_start, end_key=row_stop) + row_set.add_row_range_from_keys(start_key=row_start, + end_key=row_stop) return row_set diff --git a/system_tests/attempt_system_tests.py b/system_tests/attempt_system_tests.py index e6376f0..4aa1b7f 100644 --- a/system_tests/attempt_system_tests.py +++ b/system_tests/attempt_system_tests.py @@ -58,13 +58,15 @@ from run_system_test import run_module_tests -MODULES = ("happybase",) +MODULES = ( + 'happybase', +) SCRIPTS_DIR = os.path.dirname(__file__) -ROOT_DIR = os.path.abspath(os.path.join(SCRIPTS_DIR, "..")) -ENCRYPTED_KEYFILE = os.path.join(ROOT_DIR, "system_tests", "key.json.enc") -ENCRYPTED_KEY_ENV = "encrypted_INVALID_key" -ENCRYPTED_INIT_VECTOR_ENV = "encrypted_INVALID_iv" +ROOT_DIR = os.path.abspath(os.path.join(SCRIPTS_DIR, '..')) +ENCRYPTED_KEYFILE = os.path.join(ROOT_DIR, 'system_tests', 'key.json.enc') +ENCRYPTED_KEY_ENV = 'encrypted_INVALID_key' +ENCRYPTED_INIT_VECTOR_ENV = 'encrypted_INVALID_iv' ALL_MODULES = object() # Sentinel for argparser @@ -78,12 +80,10 @@ def check_environment(): is running in Travis and the second indicates if the current build is a non-PR for a merge to master. """ - if os.getenv("TRAVIS") == "true": + if os.getenv('TRAVIS') == 'true': is_travis = True - non_pr = ( - os.getenv("TRAVIS_PULL_REQUEST") == "false" - and os.getenv("TRAVIS_BRANCH") == "master" - ) + non_pr = (os.getenv('TRAVIS_PULL_REQUEST') == 'false' and + os.getenv('TRAVIS_BRANCH') == 'master') else: is_travis = non_pr = False @@ -92,27 +92,20 @@ def check_environment(): def decrypt_keyfile(): """Decrypt a keyfile.""" - print("Running in Travis during merge, decrypting stored " "key file.") + print('Running in Travis during merge, decrypting stored ' + 'key file.') encrypted_key = os.getenv(ENCRYPTED_KEY_ENV) encrypted_iv = os.getenv(ENCRYPTED_INIT_VECTOR_ENV) out_file = os.getenv(CREDENTIALS) # Convert encrypted key file into decrypted file to be used. - subprocess.call( - [ - "openssl", - "aes-256-cbc", - "-K", - encrypted_key, - "-iv", - encrypted_iv, - "-in", - ENCRYPTED_KEYFILE, - "-out", - out_file, - "-d", - ] - ) + subprocess.call([ + 'openssl', 'aes-256-cbc', + '-K', encrypted_key, + '-iv', encrypted_iv, + '-in', ENCRYPTED_KEYFILE, + '-out', out_file, '-d' + ]) def prepare_to_run(): @@ -131,7 +124,8 @@ def prepare_to_run(): # On a Travis PR, exit the program. if not non_pr: - print("Running in Travis during non-merge to master, " "doing nothing.") + print('Running in Travis during non-merge to master, ' + 'doing nothing.') sys.exit(0) # On a Travis build for a merge commit to master, decrypt. @@ -140,9 +134,12 @@ def prepare_to_run(): def get_parser(): """Get an argument parser to determine a list of packages.""" - parser = argparse.ArgumentParser(description="google-cloud tests runner.") - help_msg = "List of packages to be tested. " "If left blank, tests all packages." - parser.add_argument("packages", nargs="*", default=ALL_MODULES, help=help_msg) + parser = argparse.ArgumentParser( + description='google-cloud tests runner.') + help_msg = ('List of packages to be tested. ' + 'If left blank, tests all packages.') + parser.add_argument('packages', nargs='*', + default=ALL_MODULES, help=help_msg) return parser @@ -162,7 +159,7 @@ def get_modules(): invalid.append(package) if invalid: - msg = "No system test for packages: " + ", ".join(invalid) + msg = 'No system test for packages: ' + ', '.join(invalid) print(msg, file=sys.stderr) sys.exit(1) @@ -184,5 +181,5 @@ def main(): sys.exit(failed_modules) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/system_tests/happybase.py b/system_tests/happybase.py index 76e7d35..7c9aad1 100644 --- a/system_tests/happybase.py +++ b/system_tests/happybase.py @@ -29,38 +29,36 @@ from system_test_utils import unique_resource_id -_PACK_I64 = struct.Struct(">q").pack +_PACK_I64 = struct.Struct('>q').pack _FIRST_ELT = operator.itemgetter(0) -LOCATION_ID = "us-central1-c" +LOCATION_ID = 'us-central1-c' # NOTE: Avoid using the same name as in bigtable.py -INSTANCE_ID = "gcl-hb" + unique_resource_id("-") -CLUSTER_ID = "gcl-hb-c1" + unique_resource_id("-") +INSTANCE_ID = 'gcl-hb' + unique_resource_id('-') +CLUSTER_ID = 'gcl-hb-c1' + unique_resource_id('-') SERVER_NODES = 3 -TABLE_NAME = "table-name" -ALT_TABLE_NAME = "other-table" +TABLE_NAME = 'table-name' +ALT_TABLE_NAME = 'other-table' TTL_FOR_TEST = 3 -COL_FAM1 = "cf1" -COL_FAM2 = "cf2" -COL_FAM3 = "cf3" +COL_FAM1 = 'cf1' +COL_FAM2 = 'cf2' +COL_FAM3 = 'cf3' FAMILIES = { - COL_FAM1: {"max_versions": 10}, - COL_FAM2: {"max_versions": 1, "time_to_live": TTL_FOR_TEST}, + COL_FAM1: {'max_versions': 10}, + COL_FAM2: {'max_versions': 1, 'time_to_live': TTL_FOR_TEST}, COL_FAM3: {}, # use defaults } -ROW_KEY1 = b"row-key1" -ROW_KEY2 = b"row-key2a" -ROW_KEY3 = b"row-key2b" -COL1 = (COL_FAM1 + ":qual1").encode("utf-8") -COL2 = (COL_FAM1 + ":qual2").encode("utf-8") -COL3 = (COL_FAM2 + ":qual1").encode("utf-8") -COL4 = (COL_FAM3 + ":qual3").encode("utf-8") - -LABEL_KEY = u"python-system-happybase" -label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) +ROW_KEY1 = b'row-key1' +ROW_KEY2 = b'row-key2a' +ROW_KEY3 = b'row-key2b' +COL1 = (COL_FAM1 + ':qual1').encode('utf-8') +COL2 = (COL_FAM1 + ':qual2').encode('utf-8') +COL3 = (COL_FAM2 + ':qual1').encode('utf-8') +COL4 = (COL_FAM3 + ':qual3').encode('utf-8') + +LABEL_KEY = u'python-system-happybase' +label_stamp = datetime.datetime.utcnow() \ + .replace(microsecond=0, tzinfo=UTC,) \ + .strftime("%Y-%m-%dt%H-%M-%S") LABELS = {LABEL_KEY: str(label_stamp)} @@ -70,7 +68,6 @@ class Config(object): This is a mutable stand-in to allow test set-up to modify global state. """ - CONNECTION = None TABLE = None @@ -107,9 +104,8 @@ def _wait_until_complete(operation, max_attempts=5): def set_connection(): client = client_mod.Client(admin=True) instance = client.instance(INSTANCE_ID, labels=LABELS) - cluster = instance.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVER_NODES - ) + cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID, + serve_nodes=SERVER_NODES) operation = instance.create(clusters=[cluster]) operation.result(10) Config.CONNECTION = Connection(instance=instance) @@ -128,6 +124,7 @@ def tearDownModule(): class TestConnection(unittest.TestCase): + def test_create_and_delete_table(self): connection = Config.CONNECTION @@ -148,6 +145,7 @@ def test_create_table_failure(self): class BaseTableTest(unittest.TestCase): + def setUp(self): self.rows_to_delete = [] @@ -157,6 +155,7 @@ def tearDown(self): class TestTable_families(BaseTableTest): + def test_families(self): families = Config.TABLE.families() @@ -168,6 +167,7 @@ def test_families(self): class TestTable_row(BaseTableTest): + def test_row_when_empty(self): row1 = Config.TABLE.row(ROW_KEY1) row2 = Config.TABLE.row(ROW_KEY2) @@ -177,11 +177,16 @@ def test_row_when_empty(self): def test_row_with_columns(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" - value4 = b"value4" - row1_data = {COL1: value1, COL2: value2, COL3: value3, COL4: value4} + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' + value4 = b'value4' + row1_data = { + COL1: value1, + COL2: value2, + COL3: value3, + COL4: value4, + } # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -193,25 +198,31 @@ def test_row_with_columns(self): # Pick out specific columns. row1_diff_fams = table.row(ROW_KEY1, columns=[COL1, COL4]) - self.assertEqual(row1_diff_fams, {COL1: value1, COL4: value4}) + self.assertEqual(row1_diff_fams, {COL1: value1, + COL4: value4}) row1_single_col = table.row(ROW_KEY1, columns=[COL3]) self.assertEqual(row1_single_col, {COL3: value3}) row1_col_fam = table.row(ROW_KEY1, columns=[COL_FAM1]) - self.assertEqual(row1_col_fam, {COL1: value1, COL2: value2}) + self.assertEqual(row1_col_fam, {COL1: value1, + COL2: value2}) row1_fam_qual_overlap1 = table.row(ROW_KEY1, columns=[COL1, COL_FAM1]) - self.assertEqual(row1_fam_qual_overlap1, {COL1: value1, COL2: value2}) + self.assertEqual(row1_fam_qual_overlap1, {COL1: value1, + COL2: value2}) row1_fam_qual_overlap2 = table.row(ROW_KEY1, columns=[COL_FAM1, COL1]) - self.assertEqual(row1_fam_qual_overlap2, {COL1: value1, COL2: value2}) - row1_multiple_col_fams = table.row(ROW_KEY1, columns=[COL_FAM1, COL_FAM2]) - self.assertEqual( - row1_multiple_col_fams, {COL1: value1, COL2: value2, COL3: value3} - ) + self.assertEqual(row1_fam_qual_overlap2, + {COL1: value1, COL2: value2}) + row1_multiple_col_fams = table.row(ROW_KEY1, + columns=[COL_FAM1, COL_FAM2]) + self.assertEqual(row1_multiple_col_fams, + {COL1: value1, + COL2: value2, + COL3: value3}) def test_row_with_timestamp(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -225,25 +236,37 @@ def test_row_with_timestamp(self): ts2 = row1[COL2][1] ts3 = row1[COL3][1] - expected_row = {COL1: (value1, ts1), COL2: (value2, ts2), COL3: (value3, ts3)} + expected_row = { + COL1: (value1, ts1), + COL2: (value2, ts2), + COL3: (value3, ts3), + } self.assertEqual(row1, expected_row) # Make sure the timestamps are (strictly) ascending. self.assertTrue(ts1 < ts2 < ts3) # Use timestamps to retrieve row. - first_two = table.row(ROW_KEY1, timestamp=ts2 + 1, include_timestamp=True) - self.assertEqual(first_two, {COL1: (value1, ts1), COL2: (value2, ts2)}) - first_one = table.row(ROW_KEY1, timestamp=ts2, include_timestamp=True) - self.assertEqual(first_one, {COL1: (value1, ts1)}) + first_two = table.row(ROW_KEY1, timestamp=ts2 + 1, + include_timestamp=True) + self.assertEqual(first_two, { + COL1: (value1, ts1), + COL2: (value2, ts2), + }) + first_one = table.row(ROW_KEY1, timestamp=ts2, + include_timestamp=True) + self.assertEqual(first_one, { + COL1: (value1, ts1), + }) class TestTable_rows(BaseTableTest): + def test_rows(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' row1_data = {COL1: value1, COL2: value2} row2_data = {COL1: value3} @@ -260,9 +283,9 @@ def test_rows(self): def test_rows_with_returned_timestamps(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' row1_data = {COL1: value1, COL2: value2} row2_data = {COL1: value3} @@ -273,9 +296,8 @@ def test_rows_with_returned_timestamps(self): batch.put(ROW_KEY1, row1_data) batch.put(ROW_KEY2, row2_data) - rows = sorted( - table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), key=_FIRST_ELT - ) + rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), + key=_FIRST_ELT) row1, row2 = rows self.assertEqual(row1[0], ROW_KEY1) self.assertEqual(row2[0], ROW_KEY2) @@ -286,7 +308,8 @@ def test_rows_with_returned_timestamps(self): ts = row1[COL1][1] # All will have the same timestamp since we used batch. - expected_row1_result = {COL1: (value1, ts), COL2: (value2, ts)} + expected_row1_result = {COL1: (value1, ts), + COL2: (value2, ts)} self.assertEqual(row1, expected_row1_result) # NOTE: This method was written before Cloud Bigtable had the concept # of batching, so each mutation is sent individually. (This @@ -301,9 +324,9 @@ def test_rows_with_returned_timestamps(self): def test_rows_with_columns(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' row1_data = {COL1: value1, COL2: value2} row2_data = {COL1: value3} @@ -314,9 +337,8 @@ def test_rows_with_columns(self): table.put(ROW_KEY2, row2_data) # Filter a single column present in both rows. - rows_col1 = sorted( - table.rows([ROW_KEY1, ROW_KEY2], columns=[COL1]), key=_FIRST_ELT - ) + rows_col1 = sorted(table.rows([ROW_KEY1, ROW_KEY2], columns=[COL1]), + key=_FIRST_ELT) row1, row2 = rows_col1 self.assertEqual(row1, (ROW_KEY1, {COL1: value1})) self.assertEqual(row2, (ROW_KEY2, {COL1: value3})) @@ -327,8 +349,8 @@ def test_rows_with_columns(self): # Filter a column family. rows_col_fam1 = sorted( - table.rows([ROW_KEY1, ROW_KEY2], columns=[COL_FAM1]), key=_FIRST_ELT - ) + table.rows([ROW_KEY1, ROW_KEY2], columns=[COL_FAM1]), + key=_FIRST_ELT) row1, row2 = rows_col_fam1 self.assertEqual(row1, (ROW_KEY1, row1_data)) self.assertEqual(row2, (ROW_KEY2, row2_data)) @@ -338,27 +360,27 @@ def test_rows_with_columns(self): self.assertEqual(rows_col_fam2, []) # Filter a column family that overlaps with a column. - rows_col_fam_overlap1 = sorted( - table.rows([ROW_KEY1, ROW_KEY2], columns=[COL1, COL_FAM1]), key=_FIRST_ELT - ) + rows_col_fam_overlap1 = sorted(table.rows([ROW_KEY1, ROW_KEY2], + columns=[COL1, COL_FAM1]), + key=_FIRST_ELT) row1, row2 = rows_col_fam_overlap1 self.assertEqual(row1, (ROW_KEY1, row1_data)) self.assertEqual(row2, (ROW_KEY2, row2_data)) # Filter a column family that overlaps with a column (opposite order). - rows_col_fam_overlap2 = sorted( - table.rows([ROW_KEY1, ROW_KEY2], columns=[COL_FAM1, COL1]), key=_FIRST_ELT - ) + rows_col_fam_overlap2 = sorted(table.rows([ROW_KEY1, ROW_KEY2], + columns=[COL_FAM1, COL1]), + key=_FIRST_ELT) row1, row2 = rows_col_fam_overlap2 self.assertEqual(row1, (ROW_KEY1, row1_data)) self.assertEqual(row2, (ROW_KEY2, row2_data)) def test_rows_with_timestamp(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" - value4 = b"value4" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' + value4 = b'value4' # Need to clean-up row1 and row2 after. self.rows_to_delete.append(ROW_KEY1) @@ -369,9 +391,8 @@ def test_rows_with_timestamp(self): table.put(ROW_KEY1, {COL4: value4}) # Just grab the timestamps - rows = sorted( - table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), key=_FIRST_ELT - ) + rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), + key=_FIRST_ELT) row1, row2 = rows self.assertEqual(row1[0], ROW_KEY1) self.assertEqual(row2[0], ROW_KEY2) @@ -386,37 +407,39 @@ def test_rows_with_timestamp(self): self.assertTrue(ts1 < ts2 < ts3 < ts4) # Rows before the third timestamp (assumes exclusive endpoint). - rows = sorted( - table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts3, include_timestamp=True), - key=_FIRST_ELT, - ) + rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts3, + include_timestamp=True), + key=_FIRST_ELT) row1, row2 = rows self.assertEqual(row1, (ROW_KEY1, {COL1: (value1, ts1)})) self.assertEqual(row2, (ROW_KEY2, {COL1: (value2, ts2)})) # All writes (bump the exclusive endpoint by 1 millisecond). - rows = sorted( - table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts4 + 1, include_timestamp=True), - key=_FIRST_ELT, - ) + rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts4 + 1, + include_timestamp=True), + key=_FIRST_ELT) row1, row2 = rows - row1_all_data = {COL1: (value1, ts1), COL2: (value3, ts3), COL4: (value4, ts4)} + row1_all_data = { + COL1: (value1, ts1), + COL2: (value3, ts3), + COL4: (value4, ts4), + } self.assertEqual(row1, (ROW_KEY1, row1_all_data)) self.assertEqual(row2, (ROW_KEY2, {COL1: (value2, ts2)})) # First three writes, restricted to column 2. - rows = table.rows( - [ROW_KEY1, ROW_KEY2], timestamp=ts4, columns=[COL2], include_timestamp=True - ) + rows = table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts4, + columns=[COL2], include_timestamp=True) self.assertEqual(rows, [(ROW_KEY1, {COL2: (value3, ts3)})]) class TestTable_cells(BaseTableTest): + def test_cells(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -435,32 +458,35 @@ def test_cells(self): ts3 = all_cells[0][1] ts2 = all_cells[1][1] ts1 = all_cells[2][1] - self.assertEqual(all_cells, [(value3, ts3), (value2, ts2), (value1, ts1)]) + self.assertEqual(all_cells, + [(value3, ts3), (value2, ts2), (value1, ts1)]) # Limit to the two latest cells. - latest_two = table.cells(ROW_KEY1, COL1, include_timestamp=True, versions=2) + latest_two = table.cells(ROW_KEY1, COL1, include_timestamp=True, + versions=2) self.assertEqual(latest_two, [(value3, ts3), (value2, ts2)]) # Limit to cells before the 2nd timestamp (inclusive). - first_two = table.cells( - ROW_KEY1, COL1, include_timestamp=True, timestamp=ts2 + 1 - ) + first_two = table.cells(ROW_KEY1, COL1, include_timestamp=True, + timestamp=ts2 + 1) self.assertEqual(first_two, [(value2, ts2), (value1, ts1)]) # Limit to cells before the 2nd timestamp (exclusive). - first_cell = table.cells(ROW_KEY1, COL1, include_timestamp=True, timestamp=ts2) + first_cell = table.cells(ROW_KEY1, COL1, include_timestamp=True, + timestamp=ts2) self.assertEqual(first_cell, [(value1, ts1)]) class TestTable_scan(BaseTableTest): + def test_scan_when_empty(self): scan_result = list(Config.TABLE.scan()) self.assertEqual(scan_result, []) def test_scan_single_row(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -471,7 +497,8 @@ def test_scan_single_row(self): self.assertEqual(scan_result, [(ROW_KEY1, row1_data)]) scan_result_cols = list(table.scan(columns=[COL1])) - self.assertEqual(scan_result_cols, [(ROW_KEY1, {COL1: value1})]) + self.assertEqual(scan_result_cols, [(ROW_KEY1, + {COL1: value1})]) scan_result_ts = list(table.scan(include_timestamp=True)) self.assertEqual(len(scan_result_ts), 1) @@ -479,16 +506,18 @@ def test_scan_single_row(self): self.assertEqual(only_row[0], ROW_KEY1) row_values = only_row[1] ts = row_values[COL1][1] - self.assertEqual(row_values, {COL1: (value1, ts), COL2: (value2, ts)}) + self.assertEqual(row_values, + {COL1: (value1, ts), + COL2: (value2, ts)}) def test_scan_filters(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" - value4 = b"value4" - value5 = b"value5" - value6 = b"value6" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' + value4 = b'value4' + value5 = b'value5' + value6 = b'value6' row1_data = {COL1: value1, COL2: value2} row2_data = {COL2: value3, COL3: value4} row3_data = {COL3: value5, COL4: value6} @@ -503,22 +532,26 @@ def test_scan_filters(self): # Basic scan (no filters) scan_result = list(table.scan()) - self.assertEqual( - scan_result, - [(ROW_KEY1, row1_data), (ROW_KEY2, row2_data), (ROW_KEY3, row3_data)], - ) + self.assertEqual(scan_result, [ + (ROW_KEY1, row1_data), + (ROW_KEY2, row2_data), + (ROW_KEY3, row3_data), + ]) # Limit the size of the scan scan_result = list(table.scan(limit=1)) - self.assertEqual(scan_result, [(ROW_KEY1, row1_data)]) + self.assertEqual(scan_result, [ + (ROW_KEY1, row1_data), + ]) # Scan with a row prefix. prefix = ROW_KEY2[:-1] self.assertEqual(prefix, ROW_KEY3[:-1]) scan_result_prefixed = list(table.scan(row_prefix=prefix)) - self.assertEqual( - scan_result_prefixed, [(ROW_KEY2, row2_data), (ROW_KEY3, row3_data)] - ) + self.assertEqual(scan_result_prefixed, [ + (ROW_KEY2, row2_data), + (ROW_KEY3, row3_data), + ]) # Make sure our keys are sorted in order row_keys = [ROW_KEY1, ROW_KEY2, ROW_KEY3] @@ -526,31 +559,33 @@ def test_scan_filters(self): # row_start alone (inclusive) scan_result_row_start = list(table.scan(row_start=ROW_KEY2)) - self.assertEqual( - scan_result_row_start, [(ROW_KEY2, row2_data), (ROW_KEY3, row3_data)] - ) + self.assertEqual(scan_result_row_start, [ + (ROW_KEY2, row2_data), + (ROW_KEY3, row3_data), + ]) # row_stop alone (exclusive) scan_result_row_stop = list(table.scan(row_stop=ROW_KEY2)) - self.assertEqual(scan_result_row_stop, [(ROW_KEY1, row1_data)]) + self.assertEqual(scan_result_row_stop, [ + (ROW_KEY1, row1_data), + ]) # Both row_start and row_stop scan_result_row_stop_and_start = list( - table.scan(row_start=ROW_KEY1, row_stop=ROW_KEY3) - ) - self.assertEqual( - scan_result_row_stop_and_start, - [(ROW_KEY1, row1_data), (ROW_KEY2, row2_data)], - ) + table.scan(row_start=ROW_KEY1, row_stop=ROW_KEY3)) + self.assertEqual(scan_result_row_stop_and_start, [ + (ROW_KEY1, row1_data), + (ROW_KEY2, row2_data), + ]) def test_scan_timestamp(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" - value4 = b"value4" - value5 = b"value5" - value6 = b"value6" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' + value4 = b'value4' + value5 = b'value5' + value6 = b'value6' # Need to clean-up row1/2/3 after. self.rows_to_delete.append(ROW_KEY1) @@ -585,39 +620,43 @@ def test_scan_timestamp(self): ts5 = row3[COL3][1] ts6 = row1[COL1][1] - self.assertEqual(row1, {COL1: (value1, ts6), COL2: (value2, ts4)}) - self.assertEqual(row2, {COL2: (value3, ts3), COL3: (value4, ts2)}) - self.assertEqual(row3, {COL3: (value5, ts5), COL4: (value6, ts1)}) + self.assertEqual(row1, {COL1: (value1, ts6), + COL2: (value2, ts4)}) + self.assertEqual(row2, {COL2: (value3, ts3), + COL3: (value4, ts2)}) + self.assertEqual(row3, {COL3: (value5, ts5), + COL4: (value6, ts1)}) # All cells before ts1 (exclusive) - scan_result_before_ts1 = list(table.scan(timestamp=ts1, include_timestamp=True)) + scan_result_before_ts1 = list(table.scan(timestamp=ts1, + include_timestamp=True)) self.assertEqual(scan_result_before_ts1, []) # All cells before ts2 (inclusive) - scan_result_before_ts2 = list( - table.scan(timestamp=ts2 + 1, include_timestamp=True) - ) - self.assertEqual( - scan_result_before_ts2, - [(ROW_KEY2, {COL3: (value4, ts2)}), (ROW_KEY3, {COL4: (value6, ts1)})], - ) + scan_result_before_ts2 = list(table.scan(timestamp=ts2 + 1, + include_timestamp=True)) + self.assertEqual(scan_result_before_ts2, [ + (ROW_KEY2, {COL3: (value4, ts2)}), + (ROW_KEY3, {COL4: (value6, ts1)}), + ]) # All cells before ts6 (exclusive) - scan_result_before_ts6 = list(table.scan(timestamp=ts6, include_timestamp=True)) - self.assertEqual( - scan_result_before_ts6, - [ - (ROW_KEY1, {COL2: (value2, ts4)}), - (ROW_KEY2, {COL2: (value3, ts3), COL3: (value4, ts2)}), - (ROW_KEY3, {COL3: (value5, ts5), COL4: (value6, ts1)}), - ], - ) + scan_result_before_ts6 = list(table.scan(timestamp=ts6, + include_timestamp=True)) + self.assertEqual(scan_result_before_ts6, [ + (ROW_KEY1, {COL2: (value2, ts4)}), + (ROW_KEY2, {COL2: (value3, ts3), + COL3: (value4, ts2)}), + (ROW_KEY3, {COL3: (value5, ts5), + COL4: (value6, ts1)}), + ]) class TestTable_put(BaseTableTest): + def test_put(self): - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -633,15 +672,13 @@ def test_put(self): timestamp2 = row1[COL2][1] self.assertEqual(timestamp1, timestamp2) - row1_data_with_timestamps = { - COL1: (value1, timestamp1), - COL2: (value2, timestamp2), - } + row1_data_with_timestamps = {COL1: (value1, timestamp1), + COL2: (value2, timestamp2)} self.assertEqual(row1, row1_data_with_timestamps) def test_put_with_timestamp(self): - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' row1_data = {COL1: value1, COL2: value2} ts = 1461367402 @@ -651,15 +688,17 @@ def test_put_with_timestamp(self): # Check again, but this time with timestamps. row1 = Config.TABLE.row(ROW_KEY1, include_timestamp=True) - row1_data_with_timestamps = {COL1: (value1, ts), COL2: (value2, ts)} + row1_data_with_timestamps = {COL1: (value1, ts), + COL2: (value2, ts)} self.assertEqual(row1, row1_data_with_timestamps) class TestTable_delete(BaseTableTest): + def test_delete(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -675,8 +714,8 @@ def test_delete(self): def test_delete_with_columns(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -692,9 +731,9 @@ def test_delete_with_columns(self): def test_delete_with_column_family(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" - value3 = b"value3" + value1 = b'value1' + value2 = b'value2' + value3 = b'value3' row1_data = {COL1: value1, COL2: value2, COL4: value3} # Need to clean-up row1 after. @@ -710,8 +749,8 @@ def test_delete_with_column_family(self): def test_delete_with_columns_family_overlap(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -737,8 +776,8 @@ def test_delete_with_columns_family_overlap(self): def test_delete_with_timestamp(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -767,12 +806,13 @@ def test_delete_with_timestamp(self): # creating Batch._delete_range. table.delete(ROW_KEY1, columns=[COL1, COL2], timestamp=ts1) row1_after_incl_delete = table.row(ROW_KEY1, include_timestamp=True) - self.assertEqual(row1_after_incl_delete, {COL2: (value2, ts2)}) + self.assertEqual(row1_after_incl_delete, + {COL2: (value2, ts2)}) def test_delete_with_columns_and_timestamp(self): table = Config.TABLE - value1 = b"value1" - value2 = b"value2" + value1 = b'value1' + value2 = b'value2' # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -805,6 +845,7 @@ def test_delete_with_columns_and_timestamp(self): class TestTableCounterMethods(BaseTableTest): + def test_counter_get(self): table = Config.TABLE @@ -815,7 +856,8 @@ def test_counter_get(self): initial_counter = table.counter_get(ROW_KEY1, COL1) self.assertEqual(initial_counter, 0) - self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), {COL1: _PACK_I64(0)}) + self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), + {COL1: _PACK_I64(0)}) def test_counter_inc(self): table = Config.TABLE @@ -832,9 +874,8 @@ def test_counter_inc(self): self.assertEqual(updated_counter, inc_value) # Check that the value is set (does not seem to occur on HBase). - self.assertEqual( - table.row(ROW_KEY1, columns=[COL1]), {COL1: _PACK_I64(inc_value)} - ) + self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), + {COL1: _PACK_I64(inc_value)}) def test_counter_dec(self): table = Config.TABLE @@ -851,6 +892,5 @@ def test_counter_dec(self): self.assertEqual(updated_counter, -dec_value) # Check that the value is set (does not seem to occur on HBase). - self.assertEqual( - table.row(ROW_KEY1, columns=[COL1]), {COL1: _PACK_I64(-dec_value)} - ) + self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), + {COL1: _PACK_I64(-dec_value)}) diff --git a/system_tests/retry.py b/system_tests/retry.py index 674c94b..d4694f6 100644 --- a/system_tests/retry.py +++ b/system_tests/retry.py @@ -47,8 +47,8 @@ class RetryBase(object): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - - def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, logger=None): + def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): self.max_tries = max_tries self.delay = delay self.backoff = backoff @@ -79,16 +79,9 @@ class RetryErrors(RetryBase): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - - def __init__( - self, - exception, - error_predicate=_retry_all, - max_tries=MAX_TRIES, - delay=DELAY, - backoff=BACKOFF, - logger=None, - ): + def __init__(self, exception, error_predicate=_retry_all, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): super(RetryErrors, self).__init__(max_tries, delay, backoff, logger) self.exception = exception self.error_predicate = error_predicate @@ -105,11 +98,9 @@ def wrapped_function(*args, **kwargs): if not self.error_predicate(caught_exception): raise - delay = self.delay * self.backoff ** tries - msg = "%s, Trying again in %d seconds..." % ( - caught_exception, - delay, - ) + delay = self.delay * self.backoff**tries + msg = ("%s, Trying again in %d seconds..." % + (caught_exception, delay)) self.logger(msg) time.sleep(delay) @@ -139,15 +130,9 @@ class RetryResult(RetryBase): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - - def __init__( - self, - result_predicate, - max_tries=MAX_TRIES, - delay=DELAY, - backoff=BACKOFF, - logger=None, - ): + def __init__(self, result_predicate, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): super(RetryResult, self).__init__(max_tries, delay, backoff, logger) self.result_predicate = result_predicate @@ -160,11 +145,9 @@ def wrapped_function(*args, **kwargs): if self.result_predicate(result): return result - delay = self.delay * self.backoff ** tries + delay = self.delay * self.backoff**tries msg = "%s. Trying again in %d seconds..." % ( - self.result_predicate.__name__, - delay, - ) + self.result_predicate.__name__, delay,) self.logger(msg) time.sleep(delay) @@ -194,20 +177,15 @@ class RetryInstanceState(RetryBase): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - - def __init__( - self, - instance_predicate, - max_tries=MAX_TRIES, - delay=DELAY, - backoff=BACKOFF, - logger=None, - ): - super(RetryInstanceState, self).__init__(max_tries, delay, backoff, logger) + def __init__(self, instance_predicate, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryInstanceState, self).__init__( + max_tries, delay, backoff, logger) self.instance_predicate = instance_predicate def __call__(self, to_wrap): - instance = to_wrap.__self__ # only instance methods allowed + instance = to_wrap.__self__ # only instance methods allowed @wraps(to_wrap) def wrapped_function(*args, **kwargs): @@ -217,11 +195,9 @@ def wrapped_function(*args, **kwargs): if self.instance_predicate(instance): return result - delay = self.delay * self.backoff ** tries + delay = self.delay * self.backoff**tries msg = "%s. Trying again in %d seconds..." % ( - self.instance_predicate.__name__, - delay, - ) + self.instance_predicate.__name__, delay,) self.logger(msg) time.sleep(delay) diff --git a/system_tests/run_system_test.py b/system_tests/run_system_test.py index e232202..2ee4235 100644 --- a/system_tests/run_system_test.py +++ b/system_tests/run_system_test.py @@ -20,7 +20,9 @@ import system_test_utils -TEST_MODULES = {"happybase": happybase} +TEST_MODULES = { + 'happybase': happybase, +} class FailedSystemTestModule(Exception): @@ -29,21 +31,14 @@ class FailedSystemTestModule(Exception): def get_parser(): parser = argparse.ArgumentParser( - description="Google Cloud test runner against actual project." - ) + description='Google Cloud test runner against actual project.') + parser.add_argument('--package', dest='package', + choices=TEST_MODULES.keys(), + default='datastore', help='Package to be tested.') parser.add_argument( - "--package", - dest="package", - choices=TEST_MODULES.keys(), - default="datastore", - help="Package to be tested.", - ) - parser.add_argument( - "--ignore-requirements", - dest="ignore_requirements", - action="store_true", - help="Ignore the credentials requirement for the test.", - ) + '--ignore-requirements', + dest='ignore_requirements', action='store_true', + help='Ignore the credentials requirement for the test.') return parser @@ -68,10 +63,11 @@ def main(): parser = get_parser() args = parser.parse_args() try: - run_module_tests(args.package, ignore_requirements=args.ignore_requirements) + run_module_tests(args.package, + ignore_requirements=args.ignore_requirements) except FailedSystemTestModule: sys.exit(1) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/system_tests/system_test_utils.py b/system_tests/system_test_utils.py index 79c8d47..cf913d4 100644 --- a/system_tests/system_test_utils.py +++ b/system_tests/system_test_utils.py @@ -44,9 +44,10 @@ def create_scoped_required(): def check_environ(): err_msg = None if CREDENTIALS is None: - err_msg = "\nMissing variables: " + TEST_CREDENTIALS + err_msg = '\nMissing variables: ' + TEST_CREDENTIALS elif not os.path.isfile(CREDENTIALS): - err_msg = "\nThe %s path %r is not a file." % (TEST_CREDENTIALS, CREDENTIALS) + err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS, + CREDENTIALS) if err_msg is not None: msg = ENVIRON_ERROR_MSG + err_msg @@ -54,14 +55,15 @@ def check_environ(): sys.exit(1) -def unique_resource_id(delimiter="_"): +def unique_resource_id(delimiter='_'): """A unique identifier for a resource. Intended to help locate resources created in particular testing environments and at particular times. """ - build_id = os.getenv("TRAVIS_BUILD_ID", "") - if build_id == "": - return "%s%d" % (delimiter, 1000 * time.time()) + build_id = os.getenv('TRAVIS_BUILD_ID', '') + if build_id == '': + return '%s%d' % (delimiter, 1000 * time.time()) - return "%s%s%s%d" % (delimiter, build_id, delimiter, time.time()) + return '%s%s%s%d' % (delimiter, build_id, + delimiter, time.time()) diff --git a/unit_tests/test_batch.py b/unit_tests/test_batch.py index 8744562..9c3aabe 100644 --- a/unit_tests/test_batch.py +++ b/unit_tests/test_batch.py @@ -25,9 +25,9 @@ def send(self): class TestBatch(unittest.TestCase): + def _get_target_class(self): from google.cloud.happybase.batch import Batch - return Batch def _make_one(self, *args, **kwargs): @@ -54,13 +54,12 @@ def test_constructor_explicit(self): transaction = False # Must be False when batch_size is non-null batch = self._make_one( - table, timestamp=timestamp, batch_size=batch_size, transaction=transaction - ) + table, timestamp=timestamp, + batch_size=batch_size, transaction=transaction) self.assertEqual(batch._table, table) self.assertEqual(batch._batch_size, batch_size) - self.assertEqual( - batch._timestamp, _datetime_from_microseconds(1000 * timestamp) - ) + self.assertEqual(batch._timestamp, + _datetime_from_microseconds(1000 * timestamp)) next_timestamp = _datetime_from_microseconds(1000 * (timestamp + 1)) time_range = TimestampRange(end=next_timestamp) @@ -95,7 +94,8 @@ def test_constructor_with_batch_size_and_transactional(self): batch_size = 1 transaction = True with self.assertRaises(TypeError): - self._make_one(table, batch_size=batch_size, transaction=transaction) + self._make_one( + table, batch_size=batch_size, transaction=transaction) def test_send(self): low_level_table = _MockLowLevelTable() @@ -103,8 +103,8 @@ def test_send(self): batch = self._make_one(table) batch._row_map = row_map = _MockRowMap() - row_map["row-key1"] = row1 = _MockRow() - row_map["row-key2"] = row2 = _MockRow() + row_map['row-key1'] = row1 = _MockRow() + row_map['row-key2'] = row2 = _MockRow() batch._mutation_count = 1337 self.assertEqual(row_map.clear_count, 0) @@ -174,7 +174,7 @@ def test__get_row_exists(self): table = object() batch = self._make_one(table) - row_key = "row-key" + row_key = 'row-key' row_obj = object() batch._row_map[row_key] = row_obj result = batch._get_row(row_key) @@ -193,7 +193,7 @@ def test__get_row_create_new(self): low_level_table.mock_row = mock_row = object() # Actually get the row (which creates a row via a low-level table). - row_key = "row-key" + row_key = 'row-key' result = batch._get_row(row_key) self.assertEqual(result, mock_row) @@ -208,19 +208,17 @@ def _put_helper(self, use_wal_none=False): table = object() batch = self._make_one(table) batch._timestamp = timestamp = object() - row_key = "row-key" + row_key = 'row-key' batch._row_map[row_key] = row = _MockRow() - col1_fam = "cf1" - col1_qual = "qual1" - value1 = "value1" - col2_fam = "cf2" - col2_qual = "qual2" - value2 = "value2" - data = { - (col1_fam + ":" + col1_qual).encode("utf-8"): value1, - (col2_fam + ":" + col2_qual).encode("utf-8"): value2, - } + col1_fam = 'cf1' + col1_qual = 'qual1' + value1 = 'value1' + col2_fam = 'cf2' + col2_qual = 'qual2' + value2 = 'value2' + data = {(col1_fam + ':' + col1_qual).encode('utf-8'): value1, + (col2_fam + ':' + col2_qual).encode('utf-8'): value2} self.assertEqual(batch._mutation_count, 0) self.assertEqual(row.set_cell_calls, []) @@ -237,12 +235,13 @@ def _put_helper(self, use_wal_none=False): ordered_calls = sorted(row.set_cell_calls, key=first_elt) cell1_args = (col1_fam, col1_qual, value1) - cell1_kwargs = {"timestamp": timestamp} + cell1_kwargs = {'timestamp': timestamp} cell2_args = (col2_fam, col2_qual, value2) - cell2_kwargs = {"timestamp": timestamp} - self.assertEqual( - ordered_calls, [(cell1_args, cell1_kwargs), (cell2_args, cell2_kwargs)] - ) + cell2_kwargs = {'timestamp': timestamp} + self.assertEqual(ordered_calls, [ + (cell1_args, cell1_kwargs), + (cell2_args, cell2_kwargs), + ]) def test_put_bad_wal(self): import warnings @@ -270,7 +269,7 @@ def _try_send(self): table = object() batch = CallTrySend(table) - row_key = "row-key" + row_key = 'row-key' batch._row_map[row_key] = _MockRow() self.assertEqual(batch._mutation_count, 0) @@ -285,25 +284,23 @@ def _delete_columns_test_helper(self, time_range=None): batch = self._make_one(table) batch._delete_range = time_range - col1_fam = "cf1" - col2_fam = "cf2" - col2_qual = "col-name" - columns = [col1_fam + ":", col2_fam + ":" + col2_qual] + col1_fam = 'cf1' + col2_fam = 'cf2' + col2_qual = 'col-name' + columns = [col1_fam + ':', col2_fam + ':' + col2_qual] row_object = _MockRow() batch._delete_columns(columns, row_object) self.assertEqual(row_object.commits, 0) cell_deleted_args = (col2_fam, col2_qual) - cell_deleted_kwargs = {"time_range": time_range} - self.assertEqual( - row_object.delete_cell_calls, [(cell_deleted_args, cell_deleted_kwargs)] - ) + cell_deleted_kwargs = {'time_range': time_range} + self.assertEqual(row_object.delete_cell_calls, + [(cell_deleted_args, cell_deleted_kwargs)]) fam_deleted_args = (col1_fam,) - fam_deleted_kwargs = {"columns": row_object.ALL_COLUMNS} - self.assertEqual( - row_object.delete_cells_calls, [(fam_deleted_args, fam_deleted_kwargs)] - ) + fam_deleted_kwargs = {'columns': row_object.ALL_COLUMNS} + self.assertEqual(row_object.delete_cells_calls, + [(fam_deleted_args, fam_deleted_kwargs)]) def test__delete_columns(self): self._delete_columns_test_helper() @@ -317,7 +314,7 @@ def _delete_entire_row_helper(self, use_wal_none=False): table = object() batch = self._make_one(table) - row_key = "row-key" + row_key = 'row-key' batch._row_map[row_key] = row = _MockRow() self.assertEqual(row.deletes, 0) @@ -349,7 +346,7 @@ def test_delete_entire_row_with_ts(self): batch = self._make_one(table) batch._delete_range = object() - row_key = "row-key" + row_key = 'row-key' batch._row_map[row_key] = row = _MockRow() self.assertEqual(row.deletes, 0) @@ -372,7 +369,7 @@ def _try_send(self): table = object() batch = CallTrySend(table) - row_key = "row-key" + row_key = 'row-key' batch._row_map[row_key] = _MockRow() self.assertEqual(batch._mutation_count, 0) @@ -386,28 +383,26 @@ def test_delete_some_columns(self): table = object() batch = self._make_one(table) - row_key = "row-key" + row_key = 'row-key' batch._row_map[row_key] = row = _MockRow() self.assertEqual(batch._mutation_count, 0) - col1_fam = "cf1" - col2_fam = "cf2" - col2_qual = "col-name" - columns = [col1_fam + ":", col2_fam + ":" + col2_qual] + col1_fam = 'cf1' + col2_fam = 'cf2' + col2_qual = 'col-name' + columns = [col1_fam + ':', col2_fam + ':' + col2_qual] batch.delete(row_key, columns=columns) self.assertEqual(batch._mutation_count, 2) cell_deleted_args = (col2_fam, col2_qual) - cell_deleted_kwargs = {"time_range": None} - self.assertEqual( - row.delete_cell_calls, [(cell_deleted_args, cell_deleted_kwargs)] - ) + cell_deleted_kwargs = {'time_range': None} + self.assertEqual(row.delete_cell_calls, + [(cell_deleted_args, cell_deleted_kwargs)]) fam_deleted_args = (col1_fam,) - fam_deleted_kwargs = {"columns": row.ALL_COLUMNS} - self.assertEqual( - row.delete_cells_calls, [(fam_deleted_args, fam_deleted_kwargs)] - ) + fam_deleted_kwargs = {'columns': row.ALL_COLUMNS} + self.assertEqual(row.delete_cells_calls, + [(fam_deleted_args, fam_deleted_kwargs)]) def test_context_manager(self): klass = self._get_target_class() @@ -436,7 +431,7 @@ class BatchWithSend(_SendMixin, klass): with self.assertRaises(ValueError): with batch: - raise ValueError("Something bad happened") + raise ValueError('Something bad happened') self.assertTrue(batch._send_called) @@ -452,7 +447,7 @@ class BatchWithSend(_SendMixin, klass): with self.assertRaises(ValueError): with batch: - raise ValueError("Something bad happened") + raise ValueError('Something bad happened') self.assertFalse(batch._send_called) @@ -462,25 +457,25 @@ class BatchWithSend(_SendMixin, klass): class Test__get_column_pairs(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.batch import _get_column_pairs - return _get_column_pairs(*args, **kwargs) def test_it(self): - columns = [b"cf1", u"cf2:", "cf3::", "cf3:name1", "cf3:name2"] + columns = [b'cf1', u'cf2:', 'cf3::', 'cf3:name1', 'cf3:name2'] result = self._call_fut(columns) expected_result = [ - ["cf1", None], - ["cf2", None], - ["cf3", ""], - ["cf3", "name1"], - ["cf3", "name2"], + ['cf1', None], + ['cf2', None], + ['cf3', ''], + ['cf3', 'name1'], + ['cf3', 'name2'], ] self.assertEqual(result, expected_result) def test_bad_column(self): - columns = ["a:b:c"] + columns = ['a:b:c'] with self.assertRaises(ValueError): self._call_fut(columns) @@ -495,7 +490,7 @@ def test_bad_columns_var(self): self._call_fut(columns) def test_column_family_with_require_qualifier(self): - columns = ["a:"] + columns = ['a:'] with self.assertRaises(ValueError): self._call_fut(columns, require_qualifier=True) @@ -534,11 +529,13 @@ def delete_cells(self, *args, **kwargs): class _MockTable(object): + def __init__(self, low_level_table): self._low_level_table = low_level_table class _MockLowLevelTable(object): + def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs diff --git a/unit_tests/test_connection.py b/unit_tests/test_connection.py index 9967428..714babd 100644 --- a/unit_tests/test_connection.py +++ b/unit_tests/test_connection.py @@ -19,21 +19,19 @@ class Test__get_instance(unittest.TestCase): + def _call_fut(self): from google.cloud.happybase.connection import _get_instance - return _get_instance() def _helper(self, instances=(), failed_locations=()): from functools import partial client_with_instances = partial( - _Client, instances=instances, failed_locations=failed_locations - ) + _Client, instances=instances, failed_locations=failed_locations) - with mock.patch( - "google.cloud.happybase.connection.Client", client_with_instances - ): + with mock.patch('google.cloud.happybase.connection.Client', + client_with_instances): result = self._call_fut() # If we've reached this point, then _call_fut didn't fail, so we know @@ -42,7 +40,7 @@ def _helper(self, instances=(), failed_locations=()): self.assertEqual(result, instance) client = instance.client self.assertEqual(client.args, ()) - expected_kwargs = {"admin": True} + expected_kwargs = {'admin': True} self.assertEqual(client.kwargs, expected_kwargs) def test_default(self): @@ -60,15 +58,16 @@ def test_with_too_many_instances(self): def test_with_failed_locations(self): instance = _Instance() - failed_location = "us-central1-c" + failed_location = 'us-central1-c' with self.assertRaises(ValueError): - self._helper(instances=[instance], failed_locations=[failed_location]) + self._helper(instances=[instance], + failed_locations=[failed_location]) class TestConnection(unittest.TestCase): + def _get_target_class(self): from google.cloud.happybase.connection import Connection - return Connection def _make_one(self, *args, **kwargs): @@ -80,13 +79,13 @@ def test_constructor_defaults(self): self.assertEqual(connection._instance, instance) self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, "_") + self.assertEqual(connection.table_prefix_separator, '_') def test_constructor_no_autoconnect(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, "_") + self.assertEqual(connection.table_prefix_separator, '_') def test_constructor_missing_instance(self): instance = _Instance() @@ -94,35 +93,35 @@ def test_constructor_missing_instance(self): def mock_get_instance(): return instance - with mock.patch( - "google.cloud.happybase.connection._get_instance", mock_get_instance - ): - connection = self._make_one(autoconnect=False, instance=None) + with mock.patch('google.cloud.happybase.connection._get_instance', + mock_get_instance): + connection = self._make_one( + autoconnect=False, instance=None) self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, "_") + self.assertEqual(connection.table_prefix_separator, '_') self.assertEqual(connection._instance, instance) def test_constructor_explicit(self): autoconnect = False - table_prefix = "table-prefix" - table_prefix_separator = "sep" + table_prefix = 'table-prefix' + table_prefix_separator = 'sep' instance = _Instance() connection = self._make_one( autoconnect=autoconnect, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance, - ) + instance=instance) self.assertTrue(connection._instance is instance) self.assertEqual(connection.table_prefix, table_prefix) - self.assertEqual(connection.table_prefix_separator, table_prefix_separator) + self.assertEqual(connection.table_prefix_separator, + table_prefix_separator) def test_constructor_with_unknown_argument(self): instance = _Instance() with self.assertRaises(TypeError): - self._make_one(instance=instance, unknown="foo") + self._make_one(instance=instance, unknown='foo') def test_constructor_with_legacy_args(self): import warnings @@ -130,20 +129,16 @@ def test_constructor_with_legacy_args(self): instance = _Instance() with warnings.catch_warnings(record=True) as warned: self._make_one( - instance=instance, - host=object(), - port=object(), - compat=object(), - transport=object(), - protocol=object(), - ) + instance=instance, host=object(), + port=object(), compat=object(), + transport=object(), protocol=object()) self.assertEqual(len(warned), 1) - self.assertIn("host", str(warned[0])) - self.assertIn("port", str(warned[0])) - self.assertIn("compat", str(warned[0])) - self.assertIn("transport", str(warned[0])) - self.assertIn("protocol", str(warned[0])) + self.assertIn('host', str(warned[0])) + self.assertIn('port', str(warned[0])) + self.assertIn('compat', str(warned[0])) + self.assertIn('transport', str(warned[0])) + self.assertIn('protocol', str(warned[0])) def test_constructor_non_string_prefix(self): table_prefix = object() @@ -156,30 +151,30 @@ def test_constructor_non_string_prefix_separator(self): with self.assertRaises(TypeError): self._make_one( - autoconnect=False, table_prefix_separator=table_prefix_separator - ) + autoconnect=False, + table_prefix_separator=table_prefix_separator) def test__table_name_with_prefix_set(self): - table_prefix = "table-prefix" - table_prefix_separator = "<>" + table_prefix = 'table-prefix' + table_prefix_separator = '<>' instance = _Instance() connection = self._make_one( autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance, - ) + instance=instance) - name = "some-name" + name = 'some-name' prefixed = connection._table_name(name) - self.assertEqual(prefixed, table_prefix + table_prefix_separator + name) + self.assertEqual(prefixed, + table_prefix + table_prefix_separator + name) def test__table_name_with_no_prefix_set(self): instance = _Instance() connection = self._make_one(autoconnect=False, instance=instance) - name = "some-name" + name = 'some-name' prefixed = connection._table_name(name) self.assertEqual(prefixed, name) @@ -189,7 +184,7 @@ def test_table_factory(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' table = connection.table(name) self.assertTrue(isinstance(table, Table)) @@ -200,16 +195,14 @@ def _table_factory_prefix_helper(self, use_prefix=True): from google.cloud.happybase.table import Table instance = _Instance() # Avoid implicit environ check. - table_prefix = "table-prefix" - table_prefix_separator = "<>" + table_prefix = 'table-prefix' + table_prefix_separator = '<>' connection = self._make_one( - autoconnect=False, - table_prefix=table_prefix, + autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance, - ) + instance=instance) - name = "table-name" + name = 'table-name' table = connection.table(name, use_prefix=use_prefix) self.assertTrue(isinstance(table, Table)) @@ -229,11 +222,12 @@ def test_table_factory_with_ignored_prefix(self): def test_tables(self): from google.cloud.bigtable.table import Table - table_name1 = "table-name1" - table_name2 = "table-name2" - instance = _Instance( - list_tables_result=[Table(table_name1, None), Table(table_name2, None)] - ) + table_name1 = 'table-name1' + table_name2 = 'table-name2' + instance = _Instance(list_tables_result=[ + Table(table_name1, None), + Table(table_name2, None), + ]) connection = self._make_one(autoconnect=False, instance=instance) result = connection.tables() self.assertEqual(result, [table_name1, table_name2]) @@ -241,21 +235,20 @@ def test_tables(self): def test_tables_with_prefix(self): from google.cloud.bigtable.table import Table - table_prefix = "prefix" - table_prefix_separator = "<>" - unprefixed_table_name1 = "table-name1" - - table_name1 = table_prefix + table_prefix_separator + unprefixed_table_name1 - table_name2 = "table-name2" - instance = _Instance( - list_tables_result=[Table(table_name1, None), Table(table_name2, None)] - ) + table_prefix = 'prefix' + table_prefix_separator = '<>' + unprefixed_table_name1 = 'table-name1' + + table_name1 = (table_prefix + table_prefix_separator + + unprefixed_table_name1) + table_name2 = 'table-name2' + instance = _Instance(list_tables_result=[ + Table(table_name1, None), + Table(table_name2, None), + ]) connection = self._make_one( - autoconnect=False, - instance=instance, - table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator, - ) + autoconnect=False, instance=instance, table_prefix=table_prefix, + table_prefix_separator=table_prefix_separator) result = connection.tables() self.assertEqual(result, [unprefixed_table_name1]) @@ -269,18 +262,18 @@ def mock_parse_family_option(option): called_options.append(option) return mock_gc_rule - name = "table-name" - col_fam1 = "cf1" + name = 'table-name' + col_fam1 = 'cf1' col_fam_option1 = object() - col_fam2 = u"cf2" + col_fam2 = u'cf2' col_fam_option2 = object() - col_fam3 = b"cf3" + col_fam3 = b'cf3' col_fam_option3 = object() families = { col_fam1: col_fam_option1, # A trailing colon is also allowed. - col_fam2 + ":": col_fam_option2, - col_fam3 + b":": col_fam_option3, + col_fam2 + ':': col_fam_option2, + col_fam3 + b':': col_fam_option3, } tables_created = [] @@ -291,7 +284,7 @@ def make_table(*args, **kwargs): return result patch = mock.patch.multiple( - "google.cloud.happybase.connection", + 'google.cloud.happybase.connection', _LowLevelTable=make_table, _parse_family_option=mock_parse_family_option, ) @@ -307,18 +300,17 @@ def make_table(*args, **kwargs): # Check if our mock was called twice, but we don't know the order. self.assertEqual( set(called_options), - set([col_fam_option1, col_fam_option2, col_fam_option3]), - ) + set([col_fam_option1, col_fam_option2, col_fam_option3])) col_fam_dict = table_instance.col_fam_dict - expected_cf_list = ["cf1", "cf2", "cf3"] + expected_cf_list = ['cf1', 'cf2', 'cf3'] self.assertEqual(sorted(col_fam_dict), expected_cf_list) def test_create_table_bad_type(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' families = None with self.assertRaises(TypeError): connection.create_table(name, families) @@ -327,7 +319,7 @@ def test_create_table_bad_value(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' families = {} with self.assertRaises(ValueError): connection.create_table(name, families) @@ -339,14 +331,15 @@ def _create_table_error_helper(self, err_val, err_type): tables_created = [] def make_table(*args, **kwargs): - kwargs["create_error"] = err_val + kwargs['create_error'] = err_val result = _MockLowLevelTable(*args, **kwargs) tables_created.append(result) return result - name = "table-name" - families = {"foo": {}} - with mock.patch("google.cloud.happybase.connection._LowLevelTable", make_table): + name = 'table-name' + families = {'foo': {}} + with mock.patch('google.cloud.happybase.connection._LowLevelTable', + make_table): with self.assertRaises(err_type): connection.create_table(name, families) @@ -358,16 +351,15 @@ def test_create_table_already_exists(self): from grpc.framework.interfaces.face import face from google.cloud.happybase.connection import AlreadyExists - err_val = face.NetworkError( - None, None, interfaces.StatusCode.ALREADY_EXISTS, None - ) + err_val = face.NetworkError(None, None, + interfaces.StatusCode.ALREADY_EXISTS, None) self._create_table_error_helper(err_val, AlreadyExists) def test_create_table_connection_error(self): from grpc.beta import interfaces from grpc.framework.interfaces.face import face - - err_val = face.NetworkError(None, None, interfaces.StatusCode.INTERNAL, None) + err_val = face.NetworkError(None, None, + interfaces.StatusCode.INTERNAL, None) self._create_table_error_helper(err_val, face.NetworkError) def test_create_table_other_error(self): @@ -384,8 +376,9 @@ def make_table(*args, **kwargs): tables_created.append(result) return result - name = "table-name" - with mock.patch("google.cloud.happybase.connection._LowLevelTable", make_table): + name = 'table-name' + with mock.patch('google.cloud.happybase.connection._LowLevelTable', + make_table): connection.delete_table(name, disable=disable) # Just one table would have been created. @@ -414,7 +407,7 @@ def test_enable_table(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' with warnings.catch_warnings(record=True) as warned: connection.enable_table(name) @@ -429,7 +422,7 @@ def test_disable_table(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' with warnings.catch_warnings(record=True) as warned: connection.disable_table(name) @@ -444,7 +437,7 @@ def test_is_table_enabled(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' with warnings.catch_warnings(record=True) as warned: result = connection.is_table_enabled(name) @@ -460,7 +453,7 @@ def test_compact_table(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = "table-name" + name = 'table-name' with warnings.catch_warnings(record=True) as warned: connection.compact_table(name) @@ -470,9 +463,9 @@ def test_compact_table(self): class Test__parse_family_option(unittest.TestCase): + def _call_fut(self, option): from google.cloud.happybase.connection import _parse_family_option - return _parse_family_option(option) def test_dictionary_no_keys(self): @@ -488,19 +481,19 @@ def test_null(self): def test_dictionary_bad_key(self): import warnings - option = {"badkey": None} + option = {'badkey': None} with warnings.catch_warnings(record=True) as warned: result = self._call_fut(option) self.assertEqual(result, None) self.assertEqual(len(warned), 1) - self.assertIn("badkey", str(warned[0])) + self.assertIn('badkey', str(warned[0])) def test_dictionary_versions_key(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule versions = 42 - option = {"max_versions": versions} + option = {'max_versions': versions} result = self._call_fut(option) gc_rule = MaxVersionsGCRule(versions) @@ -512,7 +505,7 @@ def test_dictionary_ttl_key(self): time_to_live = 24 * 60 * 60 max_age = datetime.timedelta(days=1) - option = {"time_to_live": time_to_live} + option = {'time_to_live': time_to_live} result = self._call_fut(option) gc_rule = MaxAgeGCRule(max_age) @@ -526,7 +519,10 @@ def test_dictionary_both_keys(self): versions = 42 time_to_live = 24 * 60 * 60 - option = {"max_versions": versions, "time_to_live": time_to_live} + option = { + 'max_versions': versions, + 'time_to_live': time_to_live, + } result = self._call_fut(option) max_age = datetime.timedelta(days=1) @@ -545,11 +541,12 @@ def test_non_dictionary(self): class _Client(object): + def __init__(self, *args, **kwargs): - self.instances = kwargs.pop("instances", []) + self.instances = kwargs.pop('instances', []) for instance in self.instances: instance.client = self - self.failed_locations = kwargs.pop("failed_locations", []) + self.failed_locations = kwargs.pop('failed_locations', []) self.args = args self.kwargs = kwargs @@ -558,6 +555,7 @@ def list_instances(self): class _Instance(object): + def __init__(self, list_tables_result=()): # Included to support Connection.__del__ self._client = _Client() @@ -568,10 +566,11 @@ def list_tables(self): class _MockLowLevelTable(object): + def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs - self.create_error = kwargs.get("create_error") + self.create_error = kwargs.get('create_error') self.delete_calls = 0 self.create_calls = 0 self.col_fam_dict = {} diff --git a/unit_tests/test_pool.py b/unit_tests/test_pool.py index 1182413..a1ecc66 100644 --- a/unit_tests/test_pool.py +++ b/unit_tests/test_pool.py @@ -19,9 +19,9 @@ class TestConnectionPool(unittest.TestCase): + def _get_target_class(self): from google.cloud.happybase.pool import ConnectionPool - return ConnectionPool def _make_one(self, *args, **kwargs): @@ -49,8 +49,8 @@ def test_constructor_defaults(self): self.assertTrue(connection._instance is instance) def test_constructor_passes_kwargs(self): - table_prefix = "foo" - table_prefix_separator = "<>" + table_prefix = 'foo' + table_prefix_separator = '<>' instance = _Instance() # Avoid implicit environ check. size = 1 @@ -58,12 +58,12 @@ def test_constructor_passes_kwargs(self): size, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance, - ) + instance=instance) for connection in pool._queue.queue: self.assertEqual(connection.table_prefix, table_prefix) - self.assertEqual(connection.table_prefix_separator, table_prefix_separator) + self.assertEqual(connection.table_prefix_separator, + table_prefix_separator) def test_constructor_ignores_autoconnect(self): from google.cloud.happybase.connection import Connection @@ -85,7 +85,8 @@ def open(self): # Then make sure autoconnect=True is ignored in a pool. size = 1 - with mock.patch("google.cloud.happybase.pool.Connection", ConnectionWithOpen): + with mock.patch('google.cloud.happybase.pool.Connection', + ConnectionWithOpen): pool = self._make_one(size, autoconnect=True, instance=instance) for connection in pool._queue.queue: @@ -102,7 +103,8 @@ def test_constructor_infers_instance(self): mock_get_instance = mock.create_autospec(_get_instance) mock_get_instance.return_value = instance - with mock.patch("google.cloud.happybase.pool._get_instance", mock_get_instance): + with mock.patch('google.cloud.happybase.pool._get_instance', + mock_get_instance): pool = self._make_one(size) for connection in pool._queue.queue: @@ -129,7 +131,7 @@ def _make_one_with_mock_queue(self, queue_return): # or instances to be created in the constructor. size = -1 instance = object() - with mock.patch("google.cloud.happybase.pool._MIN_POOL_SIZE", size): + with mock.patch('google.cloud.happybase.pool._MIN_POOL_SIZE', size): pool = self._make_one(size, instance=instance) pool._queue = _Queue(queue_return) @@ -163,27 +165,26 @@ def test_connection_is_context_manager(self): pool = self._make_one_with_mock_queue(queue_return) cnxn_context = pool.connection() if six.PY3: # pragma: NO COVER Python 3 - self.assertTrue( - isinstance(cnxn_context, contextlib._GeneratorContextManager) - ) + self.assertTrue(isinstance(cnxn_context, + contextlib._GeneratorContextManager)) else: - self.assertTrue( - isinstance(cnxn_context, contextlib.GeneratorContextManager) - ) + self.assertTrue(isinstance(cnxn_context, + contextlib.GeneratorContextManager)) def test_connection_no_current_cnxn(self): queue_return = _Connection() pool = self._make_one_with_mock_queue(queue_return) timeout = 55 - self.assertFalse(hasattr(pool._thread_connections, "current")) + self.assertFalse(hasattr(pool._thread_connections, 'current')) with pool.connection(timeout=timeout) as connection: self.assertEqual(pool._thread_connections.current, queue_return) self.assertTrue(connection is queue_return) - self.assertFalse(hasattr(pool._thread_connections, "current")) + self.assertFalse(hasattr(pool._thread_connections, 'current')) self.assertEqual(pool._queue._get_calls, [(True, timeout)]) - self.assertEqual(pool._queue._put_calls, [(queue_return, None, None)]) + self.assertEqual(pool._queue._put_calls, + [(queue_return, None, None)]) def test_connection_with_current_cnxn(self): current_cnxn = _Connection() @@ -205,17 +206,20 @@ class _Client(object): class _Connection(object): + def open(self): pass class _Instance(object): + def __init__(self): # Included to support Connection.__del__ self._client = _Client() class _Queue(object): + def __init__(self, result=None): self.result = result self._get_calls = [] @@ -225,7 +229,6 @@ def get(self, block=None, timeout=None): self._get_calls.append((block, timeout)) if self.result is None: import six - raise six.moves.queue.Empty else: return self.result diff --git a/unit_tests/test_table.py b/unit_tests/test_table.py index 6783e08..ea4ff9e 100644 --- a/unit_tests/test_table.py +++ b/unit_tests/test_table.py @@ -18,9 +18,9 @@ class Test_make_row(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import make_row - return make_row(*args, **kwargs) def test_it(self): @@ -29,9 +29,9 @@ def test_it(self): class Test_make_ordered_row(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import make_ordered_row - return make_ordered_row(*args, **kwargs) def test_it(self): @@ -40,16 +40,16 @@ def test_it(self): class TestTable(unittest.TestCase): + def _get_target_class(self): from google.cloud.happybase.table import Table - return Table def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): - name = "table-name" + name = 'table-name' instance = object() connection = _Connection(instance) tables_constructed = [] @@ -59,9 +59,8 @@ def make_low_level_table(*args, **kwargs): tables_constructed.append(result) return result - with mock.patch( - "google.cloud.happybase.table._LowLevelTable", make_low_level_table - ): + with mock.patch('google.cloud.happybase.table._LowLevelTable', + make_low_level_table): table = self._make_one(name, connection) self.assertEqual(table.name, name) self.assertEqual(table.connection, connection) @@ -72,7 +71,7 @@ def make_low_level_table(*args, **kwargs): self.assertEqual(table_instance.kwargs, {}) def test_constructor_null_connection(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) self.assertEqual(table.name, name) @@ -80,13 +79,13 @@ def test_constructor_null_connection(self): self.assertEqual(table._low_level_table, None) def test_families(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() # Mock the column families to be returned. - col_fam_name = "fam" + col_fam_name = 'fam' gc_rule = object() col_fam = _MockLowLevelColumnFamily(col_fam_name, gc_rule=gc_rule) col_fams = {col_fam_name: col_fam} @@ -99,9 +98,8 @@ def mock_gc_rule_to_dict(gc_rule): to_dict_calls.append(gc_rule) return to_dict_result - with mock.patch( - "google.cloud.happybase.table._gc_rule_to_dict", mock_gc_rule_to_dict - ): + with mock.patch('google.cloud.happybase.table._gc_rule_to_dict', + mock_gc_rule_to_dict): result = table.families() self.assertEqual(result, {col_fam_name: to_dict_result}) @@ -109,12 +107,12 @@ def mock_gc_rule_to_dict(gc_rule): self.assertEqual(to_dict_calls, [gc_rule]) def test___repr__(self): - name = "table-name" + name = 'table-name' table = self._make_one(name, None) - self.assertEqual(repr(table), "") + self.assertEqual(repr(table), '') def test_regions(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) @@ -122,7 +120,7 @@ def test_regions(self): table.regions() def test_row_empty_row(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -136,28 +134,30 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - row_key = "row-key" + row_key = 'row-key' timestamp = object() - with mock.patch( - "google.cloud.happybase.table._filter_chain_helper", - mock_filter_chain_helper, - ): + with mock.patch('google.cloud.happybase.table._filter_chain_helper', + mock_filter_chain_helper): result = table.row(row_key, timestamp=timestamp) # read_row_result == None --> No results. self.assertEqual(result, {}) read_row_args = (row_key,) - read_row_kwargs = {"filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] - ) + read_row_kwargs = {'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_row_calls, [ + (read_row_args, read_row_kwargs), + ]) - expected_kwargs = {"filters": [], "versions": 1, "timestamp": timestamp} + expected_kwargs = { + 'filters': [], + 'versions': 1, + 'timestamp': timestamp, + } self.assertEqual(mock_filters, [expected_kwargs]) def test_row_with_columns(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -178,10 +178,10 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - row_key = "row-key" + row_key = 'row-key' columns = object() patch = mock.patch.multiple( - "google.cloud.happybase.table", + 'google.cloud.happybase.table', _filter_chain_helper=mock_filter_chain_helper, _columns_filter_helper=mock_columns_filter_helper, ) @@ -192,24 +192,24 @@ def mock_filter_chain_helper(**kwargs): self.assertEqual(result, {}) read_row_args = (row_key,) - read_row_kwargs = {"filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] - ) + read_row_kwargs = {'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_row_calls, [ + (read_row_args, read_row_kwargs), + ]) self.assertEqual(mock_columns, [(columns,)]) expected_kwargs = { - "filters": [fake_col_filter], - "versions": 1, - "timestamp": None, + 'filters': [fake_col_filter], + 'versions': 1, + 'timestamp': None, } self.assertEqual(mock_filters, [expected_kwargs]) def test_row_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key = "row-key" - name = "table-name" + row_key = 'row-key' + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -231,13 +231,13 @@ def mock_cells_to_pairs(*args, **kwargs): mock_cells.append((args, kwargs)) return [fake_pair] - col_fam = u"cf1" - qual = b"qual" + col_fam = u'cf1' + qual = b'qual' fake_cells = object() partial_row._cells = {col_fam: {qual: fake_cells}} include_timestamp = object() patch = mock.patch.multiple( - "google.cloud.happybase.table", + 'google.cloud.happybase.table', _filter_chain_helper=mock_filter_chain_helper, _cells_to_pairs=mock_cells_to_pairs, ) @@ -245,22 +245,27 @@ def mock_cells_to_pairs(*args, **kwargs): result = table.row(row_key, include_timestamp=include_timestamp) # The results come from _cells_to_pairs. - expected_result = {col_fam.encode("ascii") + b":" + qual: fake_pair} + expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair} self.assertEqual(result, expected_result) read_row_args = (row_key,) - read_row_kwargs = {"filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] - ) + read_row_kwargs = {'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_row_calls, [ + (read_row_args, read_row_kwargs), + ]) - expected_kwargs = {"filters": [], "versions": 1, "timestamp": None} + expected_kwargs = { + 'filters': [], + 'versions': 1, + 'timestamp': None, + } self.assertEqual(mock_filters, [expected_kwargs]) - to_pairs_kwargs = {"include_timestamp": include_timestamp} - self.assertEqual(mock_cells, [((fake_cells,), to_pairs_kwargs)]) + to_pairs_kwargs = {'include_timestamp': include_timestamp} + self.assertEqual(mock_cells, + [((fake_cells,), to_pairs_kwargs)]) def test_rows_empty_row(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) @@ -268,7 +273,7 @@ def test_rows_empty_row(self): self.assertEqual(result, []) def test_rows_with_columns(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -285,7 +290,8 @@ def mock_columns_filter_helper(*args): fake_row_set = object() - def mock_get_row_set_from_rows(*args): # pylint: disable=unused-argument + def mock_get_row_set_from_rows( + *args): # pylint: disable=unused-argument return fake_row_set fake_filter = object() @@ -295,10 +301,10 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - rows = ["row-key"] + rows = ['row-key'] columns = object() patch = mock.patch.multiple( - "google.cloud.happybase.table", + 'google.cloud.happybase.table', _filter_chain_helper=mock_filter_chain_helper, _columns_filter_helper=mock_columns_filter_helper, _get_row_set_from_rows=mock_get_row_set_from_rows, @@ -310,27 +316,28 @@ def mock_filter_chain_helper(**kwargs): self.assertEqual(result, []) read_rows_args = () - read_rows_kwargs = {"row_set": fake_row_set, "filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_rows_calls, [(read_rows_args, read_rows_kwargs)] - ) + read_rows_kwargs = {'row_set': fake_row_set, + 'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_rows_calls, [ + (read_rows_args, read_rows_kwargs), + ]) self.assertEqual(mock_cols, [(columns,)]) expected_kwargs = { - "filters": [fake_col_filter], - "versions": 1, - "timestamp": None, + 'filters': [fake_col_filter], + 'versions': 1, + 'timestamp': None, } self.assertEqual(mock_filters, [expected_kwargs]) def test_rows_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key1 = "row-key1" - row_key2 = "row-key2" + row_key1 = 'row-key1' + row_key2 = 'row-key2' rows = [row_key1, row_key2] - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -343,7 +350,8 @@ def test_rows_with_results(self): # Set-up mocks. fake_row_set = object() - def mock_get_row_set_from_rows(*args): # pylint: disable=unused-argument + def mock_get_row_set_from_rows( + *args): # pylint: disable=unused-argument return fake_row_set fake_filter = object() @@ -360,13 +368,13 @@ def mock_cells_to_pairs(*args, **kwargs): mock_cells.append((args, kwargs)) return [fake_pair] - col_fam = u"cf1" - qual = b"qual" + col_fam = u'cf1' + qual = b'qual' fake_cells = object() row1._cells = {col_fam: {qual: fake_cells}} include_timestamp = object() patch = mock.patch.multiple( - "google.cloud.happybase.table", + 'google.cloud.happybase.table', _filter_chain_helper=mock_filter_chain_helper, _get_row_set_from_rows=mock_get_row_set_from_rows, _cells_to_pairs=mock_cells_to_pairs, @@ -375,22 +383,28 @@ def mock_cells_to_pairs(*args, **kwargs): result = table.rows(rows, include_timestamp=include_timestamp) # read_rows_result == PartialRowsData with row_key1 - expected_result = {col_fam.encode("ascii") + b":" + qual: fake_pair} + expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair} self.assertEqual(result, [(row_key1, expected_result)]) read_rows_args = () - read_rows_kwargs = {"row_set": fake_row_set, "filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_rows_calls, [(read_rows_args, read_rows_kwargs)] - ) + read_rows_kwargs = {'row_set': fake_row_set, + 'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_rows_calls, [ + (read_rows_args, read_rows_kwargs), + ]) - expected_kwargs = {"filters": [], "versions": 1, "timestamp": None} + expected_kwargs = { + 'filters': [], + 'versions': 1, + 'timestamp': None, + } self.assertEqual(mock_filters, [expected_kwargs]) - to_pairs_kwargs = {"include_timestamp": include_timestamp} - self.assertEqual(mock_cells, [((fake_cells,), to_pairs_kwargs)]) + to_pairs_kwargs = {'include_timestamp': include_timestamp} + self.assertEqual(mock_cells, + [((fake_cells,), to_pairs_kwargs)]) def test_cells_empty_row(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -404,31 +418,33 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - row_key = "row-key" - column = "fam:col1" - with mock.patch( - "google.cloud.happybase.table._filter_chain_helper", - mock_filter_chain_helper, - ): + row_key = 'row-key' + column = 'fam:col1' + with mock.patch('google.cloud.happybase.table._filter_chain_helper', + mock_filter_chain_helper): result = table.cells(row_key, column) # read_row_result == None --> No results. self.assertEqual(result, []) read_row_args = (row_key,) - read_row_kwargs = {"filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] - ) + read_row_kwargs = {'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_row_calls, [ + (read_row_args, read_row_kwargs), + ]) - expected_kwargs = {"column": column, "versions": None, "timestamp": None} + expected_kwargs = { + 'column': column, + 'versions': None, + 'timestamp': None, + } self.assertEqual(mock_filters, [expected_kwargs]) def test_cells_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key = "row-key" - name = "table-name" + row_key = 'row-key' + name = 'table-name' connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -455,41 +471,42 @@ def mock_cells_to_pairs(*args, **kwargs): mock_cells.append((args, kwargs)) return fake_result - col_fam = "cf1" - qual = "qual" + col_fam = 'cf1' + qual = 'qual' fake_cells = object() - partial_row._cells = {col_fam: {qual.encode("utf-8"): fake_cells}} - column = (col_fam + ":" + qual).encode("utf-8") + partial_row._cells = {col_fam: {qual.encode('utf-8'): fake_cells}} + column = (col_fam + ':' + qual).encode('utf-8') patch = mock.patch.multiple( - "google.cloud.happybase.table", + 'google.cloud.happybase.table', _filter_chain_helper=mock_filter_chain_helper, _cells_to_pairs=mock_cells_to_pairs, ) with patch: - result = table.cells( - row_key, - column, - versions=versions, - timestamp=timestamp, - include_timestamp=include_timestamp, - ) + result = table.cells(row_key, column, versions=versions, + timestamp=timestamp, + include_timestamp=include_timestamp) self.assertEqual(result, fake_result) read_row_args = (row_key,) - read_row_kwargs = {"filter_": fake_filter} - self.assertEqual( - table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] - ) - - filter_kwargs = {"column": column, "versions": versions, "timestamp": timestamp} + read_row_kwargs = {'filter_': fake_filter} + self.assertEqual(table._low_level_table.read_row_calls, [ + (read_row_args, read_row_kwargs), + ]) + + filter_kwargs = { + 'column': column, + 'versions': versions, + 'timestamp': timestamp, + } self.assertEqual(mock_filters, [filter_kwargs]) - to_pairs_kwargs = {"include_timestamp": include_timestamp} - self.assertEqual(mock_cells, [((fake_cells,), to_pairs_kwargs)]) + to_pairs_kwargs = {'include_timestamp': include_timestamp} + self.assertEqual(mock_cells, + [((fake_cells,), to_pairs_kwargs)]) def test_scan_with_batch_size(self): import warnings - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) # Use unknown to force a TypeError, so we don't need to @@ -499,12 +516,12 @@ def test_scan_with_batch_size(self): list(table.scan(batch_size=object(), unknown=None)) self.assertEqual(len(warned), 1) - self.assertIn("batch_size", str(warned[0])) + self.assertIn('batch_size', str(warned[0])) def test_scan_with_scan_batching(self): import warnings - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) # Use unknown to force a TypeError, so we don't need to @@ -514,12 +531,12 @@ def test_scan_with_scan_batching(self): list(table.scan(scan_batching=object(), unknown=None)) self.assertEqual(len(warned), 1) - self.assertIn("scan_batching", str(warned[0])) + self.assertIn('scan_batching', str(warned[0])) def test_scan_with_sorted_columns(self): import warnings - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) # Use unknown to force a TypeError, so we don't need to @@ -529,45 +546,37 @@ def test_scan_with_sorted_columns(self): list(table.scan(sorted_columns=object(), unknown=None)) self.assertEqual(len(warned), 1) - self.assertIn("sorted_columns", str(warned[0])) + self.assertIn('sorted_columns', str(warned[0])) def test_scan_with_invalid_limit(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) with self.assertRaises(ValueError): list(table.scan(limit=-10)) def test_scan_with_row_prefix_and_row_start(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) with self.assertRaises(ValueError): - list(table.scan(row_prefix="a", row_stop="abc")) + list(table.scan(row_prefix='a', row_stop='abc')) def test_scan_with_string_filter(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) with self.assertRaises(TypeError): - list(table.scan(filter="some-string")) - - def _scan_test_helper( - self, - row_limits=(None, None), - row_prefix=None, - columns=None, - filter_=None, - timestamp=None, - include_timestamp=False, - limit=None, - rr_result=None, - expected_result=None, - ): + list(table.scan(filter='some-string')) + + def _scan_test_helper(self, row_limits=(None, None), row_prefix=None, + columns=None, filter_=None, timestamp=None, + include_timestamp=False, limit=None, rr_result=None, + expected_result=None): import types from google.cloud.happybase.table import _string_successor - name = "table-name" + name = 'table-name' row_start, row_stop = row_limits connection = None table = self._make_one(name, connection) @@ -597,22 +606,17 @@ def mock_get_row_set_object(*args): # pylint: disable=unused-argument return fake_row_set patch = mock.patch.multiple( - "google.cloud.happybase.table", + 'google.cloud.happybase.table', _filter_chain_helper=mock_filter_chain_helper, _columns_filter_helper=mock_columns_filter_helper, _get_row_set_object=mock_get_row_set_object, ) with patch: - result = table.scan( - row_start=row_start, - row_stop=row_stop, - row_prefix=row_prefix, - columns=columns, - filter=filter_, - timestamp=timestamp, - include_timestamp=include_timestamp, - limit=limit, - ) + result = table.scan(row_start=row_start, row_stop=row_stop, + row_prefix=row_prefix, columns=columns, + filter=filter_, timestamp=timestamp, + include_timestamp=include_timestamp, + limit=limit) self.assertTrue(isinstance(result, types.GeneratorType)) # Need to consume the result while the monkey patch is applied. # read_rows_result == Empty PartialRowsData --> No results. @@ -625,14 +629,14 @@ def mock_get_row_set_object(*args): # pylint: disable=unused-argument row_stop = _string_successor(row_prefix) read_rows_kwargs = { - "row_set": fake_row_set, - "filter_": fake_filter, - "limit": limit, + 'row_set': fake_row_set, + 'filter_': fake_filter, + 'limit': limit, } - self.assertEqual( - table._low_level_table.read_rows_calls, [(read_rows_args, read_rows_kwargs)] - ) + self.assertEqual(table._low_level_table.read_rows_calls, [ + (read_rows_args, read_rows_kwargs), + ]) if columns is not None: self.assertEqual(mock_columns, [(columns,)]) @@ -644,7 +648,11 @@ def mock_get_row_set_object(*args): # pylint: disable=unused-argument filters.append(filter_) if columns: filters.append(fake_col_filter) - expected_kwargs = {"filters": filters, "versions": 1, "timestamp": timestamp} + expected_kwargs = { + 'filters': filters, + 'versions': 1, + 'timestamp': timestamp, + } self.assertEqual(mock_filters, [expected_kwargs]) def test_scan_with_columns(self): @@ -652,13 +660,13 @@ def test_scan_with_columns(self): self._scan_test_helper(columns=columns) def test_scan_with_row_start_and_stop(self): - row_start = "bar" - row_stop = "foo" + row_start = 'bar' + row_stop = 'foo' row_limits = (row_start, row_stop) self._scan_test_helper(row_limits=row_limits) def test_scan_with_row_prefix(self): - row_prefix = "row-prefi" + row_prefix = 'row-prefi' self._scan_test_helper(row_prefix=row_prefix) def test_scan_with_filter(self): @@ -673,22 +681,20 @@ def test_scan_with_no_results(self): def test_scan_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key1 = "row-key1" + row_key1 = 'row-key1' row1 = PartialRowData(row_key1) rr_result = _MockPartialRowsData(rows={row_key1: row1}, iterations=1) include_timestamp = object() expected_result = [(row_key1, {})] - self._scan_test_helper( - include_timestamp=include_timestamp, - rr_result=rr_result, - expected_result=expected_result, - ) + self._scan_test_helper(include_timestamp=include_timestamp, + rr_result=rr_result, + expected_result=expected_result) def test_put(self): from google.cloud.happybase.table import _WAL_SENTINEL - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) batches_created = [] @@ -698,10 +704,11 @@ def make_batch(*args, **kwargs): batches_created.append(result) return result - row = "row-key" - data = {"fam:col": "foo"} + row = 'row-key' + data = {'fam:col': 'foo'} timestamp = None - with mock.patch("google.cloud.happybase.table.Batch", make_batch): + with mock.patch('google.cloud.happybase.table.Batch', + make_batch): result = table.put(row, data, timestamp=timestamp) # There is no return value. @@ -712,10 +719,10 @@ def make_batch(*args, **kwargs): self.assertTrue(isinstance(batch, _MockBatch)) self.assertEqual(batch.args, (table,)) expected_kwargs = { - "timestamp": timestamp, - "batch_size": None, - "transaction": False, - "wal": _WAL_SENTINEL, + 'timestamp': timestamp, + 'batch_size': None, + 'transaction': False, + 'wal': _WAL_SENTINEL, } self.assertEqual(batch.kwargs, expected_kwargs) # Make sure it was a successful context manager @@ -726,7 +733,7 @@ def make_batch(*args, **kwargs): def test_delete(self): from google.cloud.happybase.table import _WAL_SENTINEL - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) batches_created = [] @@ -736,10 +743,11 @@ def make_batch(*args, **kwargs): batches_created.append(result) return result - row = "row-key" - columns = ["fam:col1", "fam:col2"] + row = 'row-key' + columns = ['fam:col1', 'fam:col2'] timestamp = None - with mock.patch("google.cloud.happybase.table.Batch", make_batch): + with mock.patch('google.cloud.happybase.table.Batch', + make_batch): result = table.delete(row, columns=columns, timestamp=timestamp) # There is no return value. @@ -750,10 +758,10 @@ def make_batch(*args, **kwargs): self.assertTrue(isinstance(batch, _MockBatch)) self.assertEqual(batch.args, (table,)) expected_kwargs = { - "timestamp": timestamp, - "batch_size": None, - "transaction": False, - "wal": _WAL_SENTINEL, + 'timestamp': timestamp, + 'batch_size': None, + 'transaction': False, + 'wal': _WAL_SENTINEL, } self.assertEqual(batch.kwargs, expected_kwargs) # Make sure it was a successful context manager @@ -762,7 +770,7 @@ def make_batch(*args, **kwargs): self.assertEqual(batch.delete_args, [(row, columns)]) def test_batch(self): - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) @@ -771,21 +779,18 @@ def test_batch(self): transaction = False # Must be False when batch_size is non-null wal = object() - with mock.patch("google.cloud.happybase.table.Batch", _MockBatch): - result = table.batch( - timestamp=timestamp, - batch_size=batch_size, - transaction=transaction, - wal=wal, - ) + with mock.patch('google.cloud.happybase.table.Batch', + _MockBatch): + result = table.batch(timestamp=timestamp, batch_size=batch_size, + transaction=transaction, wal=wal) self.assertTrue(isinstance(result, _MockBatch)) self.assertEqual(result.args, (table,)) expected_kwargs = { - "timestamp": timestamp, - "batch_size": batch_size, - "transaction": transaction, - "wal": wal, + 'timestamp': timestamp, + 'batch_size': batch_size, + 'transaction': transaction, + 'wal': wal, } self.assertEqual(result.kwargs, expected_kwargs) @@ -803,12 +808,12 @@ def counter_inc(self, row, column, value=1): self.value += value return self.value - name = "table-name" + name = 'table-name' connection = None table = TableWithInc(name, connection) - row = "row-key" - column = "fam:col1" + row = 'row-key' + column = 'fam:col1' self.assertEqual(TableWithInc.incremented, []) result = table.counter_get(row, column) self.assertEqual(result, counter_value) @@ -828,12 +833,12 @@ def counter_inc(self, row, column, value=1): self.value += value return self.value - name = "table-name" + name = 'table-name' connection = None table = TableWithInc(name, connection) - row = "row-key" - column = "fam:col1" + row = 'row-key' + column = 'fam:col1' dec_value = 987 self.assertEqual(TableWithInc.incremented, []) result = table.counter_dec(row, column, value=dec_value) @@ -843,14 +848,13 @@ def counter_inc(self, row, column, value=1): def _counter_inc_helper(self, row, column, value, commit_result): import six - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) # Mock the return values. table._low_level_table = _MockLowLevelTable() table._low_level_table.row_values[row] = row_obj = _MockLowLevelRow( - row, commit_result=commit_result - ) + row, commit_result=commit_result) self.assertFalse(row_obj._append) result = table.counter_inc(row, column, value=value) @@ -862,14 +866,15 @@ def _counter_inc_helper(self, row, column, value, commit_result): # Check the row values returned. row_obj = table._low_level_table.row_values[row] if isinstance(column, six.binary_type): - column = column.decode("utf-8") - self.assertEqual(row_obj.counts, {tuple(column.split(":")): incremented_value}) + column = column.decode('utf-8') + self.assertEqual(row_obj.counts, + {tuple(column.split(':')): incremented_value}) def test_counter_set(self): import struct from google.cloud.happybase.table import _WAL_SENTINEL - name = "table-name" + name = 'table-name' connection = None table = self._make_one(name, connection) batches_created = [] @@ -879,10 +884,11 @@ def make_batch(*args, **kwargs): batches_created.append(result) return result - row = "row-key" - column = "fam:col1" + row = 'row-key' + column = 'fam:col1' value = 42 - with mock.patch("google.cloud.happybase.table.Batch", make_batch): + with mock.patch('google.cloud.happybase.table.Batch', + make_batch): result = table.counter_set(row, column, value=value) # There is no return value. @@ -893,81 +899,87 @@ def make_batch(*args, **kwargs): self.assertTrue(isinstance(batch, _MockBatch)) self.assertEqual(batch.args, (table,)) expected_kwargs = { - "timestamp": None, - "batch_size": None, - "transaction": False, - "wal": _WAL_SENTINEL, + 'timestamp': None, + 'batch_size': None, + 'transaction': False, + 'wal': _WAL_SENTINEL, } self.assertEqual(batch.kwargs, expected_kwargs) # Make sure it was a successful context manager self.assertEqual(batch.exit_vals, [(None, None, None)]) - data = {column: struct.Struct(">q").pack(value)} + data = {column: struct.Struct('>q').pack(value)} self.assertEqual(batch.put_args, [(row, data)]) self.assertEqual(batch.delete_args, []) def test_counter_inc(self): import struct - row = "row-key" - col_fam = u"fam" - col_qual = u"col1" - column = col_fam + u":" + col_qual + row = 'row-key' + col_fam = u'fam' + col_qual = u'col1' + column = col_fam + u':' + col_qual value = 42 - packed_value = struct.pack(">q", value) + packed_value = struct.pack('>q', value) fake_timestamp = None - commit_result = {col_fam: {col_qual.encode(): [(packed_value, fake_timestamp)]}} + commit_result = { + col_fam: { + col_qual.encode(): [(packed_value, fake_timestamp)], + } + } self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_column_bytes(self): import struct - row = "row-key" - col_fam = b"fam" - col_qual = b"col1" - column = col_fam + b":" + col_qual + row = 'row-key' + col_fam = b'fam' + col_qual = b'col1' + column = col_fam + b':' + col_qual value = 42 - packed_value = struct.pack(">q", value) + packed_value = struct.pack('>q', value) fake_timestamp = None commit_result = { - col_fam.decode("utf-8"): {col_qual: [(packed_value, fake_timestamp)]} + col_fam.decode('utf-8'): { + col_qual: [(packed_value, fake_timestamp)], + } } self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_bad_result(self): - row = "row-key" - col_fam = "fam" - col_qual = "col1" - column = col_fam + ":" + col_qual + row = 'row-key' + col_fam = 'fam' + col_qual = 'col1' + column = col_fam + ':' + col_qual value = 42 commit_result = None with self.assertRaises(TypeError): self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_result_key_error(self): - row = "row-key" - col_fam = "fam" - col_qual = "col1" - column = col_fam + ":" + col_qual + row = 'row-key' + col_fam = 'fam' + col_qual = 'col1' + column = col_fam + ':' + col_qual value = 42 commit_result = {} with self.assertRaises(KeyError): self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_result_nested_key_error(self): - row = "row-key" - col_fam = "fam" - col_qual = "col1" - column = col_fam + ":" + col_qual + row = 'row-key' + col_fam = 'fam' + col_qual = 'col1' + column = col_fam + ':' + col_qual value = 42 commit_result = {col_fam: {}} with self.assertRaises(KeyError): self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_result_non_unique_cell(self): - row = "row-key" - col_fam = "fam" - col_qual = "col1" - column = col_fam + ":" + col_qual + row = 'row-key' + col_fam = 'fam' + col_qual = 'col1' + column = col_fam + ':' + col_qual value = 42 fake_timestamp = None packed_value = None @@ -976,7 +988,7 @@ def test_counter_inc_result_non_unique_cell(self): col_qual.encode(): [ (packed_value, fake_timestamp), (packed_value, fake_timestamp), - ] + ], } } with self.assertRaises(ValueError): @@ -984,9 +996,9 @@ def test_counter_inc_result_non_unique_cell(self): class Test__gc_rule_to_dict(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _gc_rule_to_dict - return _gc_rule_to_dict(*args, **kwargs) def test_with_null(self): @@ -1000,7 +1012,7 @@ def test_with_max_versions(self): max_versions = 2 gc_rule = MaxVersionsGCRule(max_versions) result = self._call_fut(gc_rule) - expected_result = {"max_versions": max_versions} + expected_result = {'max_versions': max_versions} self.assertEqual(result, expected_result) def test_with_max_age(self): @@ -1011,7 +1023,7 @@ def test_with_max_age(self): max_age = datetime.timedelta(seconds=time_to_live) gc_rule = MaxAgeGCRule(max_age) result = self._call_fut(gc_rule) - expected_result = {"time_to_live": time_to_live} + expected_result = {'time_to_live': time_to_live} self.assertEqual(result, expected_result) def test_with_non_gc_rule(self): @@ -1056,7 +1068,10 @@ def test_with_intersection_two_rules(self): rule2 = MaxVersionsGCRule(max_versions) gc_rule = GCRuleIntersection(rules=[rule1, rule2]) result = self._call_fut(gc_rule) - expected_result = {"max_versions": max_versions, "time_to_live": time_to_live} + expected_result = { + 'max_versions': max_versions, + 'time_to_live': time_to_live, + } self.assertEqual(result, expected_result) def test_with_intersection_two_nested_rules(self): @@ -1070,32 +1085,32 @@ def test_with_intersection_two_nested_rules(self): class Test__string_successor(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _string_successor - return _string_successor(*args, **kwargs) def test_with_alphanumeric(self): - self.assertEqual(self._call_fut(b"boa"), b"bob") - self.assertEqual(self._call_fut(b"abc1"), b"abc2") + self.assertEqual(self._call_fut(b'boa'), b'bob') + self.assertEqual(self._call_fut(b'abc1'), b'abc2') def test_with_last_byte(self): - self.assertEqual(self._call_fut(b"boa\xff"), b"bob") + self.assertEqual(self._call_fut(b'boa\xff'), b'bob') def test_with_empty_string(self): - self.assertEqual(self._call_fut(b""), b"") + self.assertEqual(self._call_fut(b''), b'') def test_with_all_last_bytes(self): - self.assertEqual(self._call_fut(b"\xff\xff\xff"), b"") + self.assertEqual(self._call_fut(b'\xff\xff\xff'), b'') def test_with_unicode_input(self): - self.assertEqual(self._call_fut(u"boa"), b"bob") + self.assertEqual(self._call_fut(u'boa'), b'bob') class Test__convert_to_time_range(unittest.TestCase): + def _call_fut(self, timestamp=None): from google.cloud.happybase.table import _convert_to_time_range - return _convert_to_time_range(timestamp=timestamp) def test_null(self): @@ -1121,17 +1136,17 @@ def test_success(self): class Test__cells_to_pairs(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _cells_to_pairs - return _cells_to_pairs(*args, **kwargs) def test_without_timestamp(self): from google.cloud.bigtable.row_data import Cell - value1 = "foo" + value1 = 'foo' cell1 = Cell(value=value1, timestamp_micros=None) - value2 = "bar" + value2 = 'bar' cell2 = Cell(value=value2, timestamp_micros=None) result = self._call_fut([cell1, cell2]) @@ -1140,70 +1155,73 @@ def test_without_timestamp(self): def test_with_timestamp(self): from google.cloud.bigtable.row_data import Cell - value1 = "foo" + value1 = 'foo' ts1_millis = 1221934570148 ts1 = ts1_millis * 1000 cell1 = Cell(value=value1, timestamp_micros=ts1) - value2 = "bar" + value2 = 'bar' ts2_millis = 1221955575548 ts2 = ts2_millis * 1000 cell2 = Cell(value=value2, timestamp_micros=ts2) result = self._call_fut([cell1, cell2], include_timestamp=True) - self.assertEqual(result, [(value1, ts1_millis), (value2, ts2_millis)]) + self.assertEqual(result, + [(value1, ts1_millis), (value2, ts2_millis)]) class Test__partial_row_to_dict(unittest.TestCase): + def _call_fut(self, partial_row_data, include_timestamp=False): from google.cloud.happybase.table import _partial_row_to_dict - - return _partial_row_to_dict( - partial_row_data, include_timestamp=include_timestamp - ) + return _partial_row_to_dict(partial_row_data, + include_timestamp=include_timestamp) def test_without_timestamp(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData - row_data = PartialRowData(b"row-key") - val1 = b"hi-im-bytes" - val2 = b"bi-im-hytes" - row_data._cells[u"fam1"] = { - b"col1": [Cell(val1, None)], - b"col2": [Cell(val2, None)], + row_data = PartialRowData(b'row-key') + val1 = b'hi-im-bytes' + val2 = b'bi-im-hytes' + row_data._cells[u'fam1'] = { + b'col1': [Cell(val1, None)], + b'col2': [Cell(val2, None)], } result = self._call_fut(row_data) - expected_result = {b"fam1:col1": val1, b"fam1:col2": val2} + expected_result = { + b'fam1:col1': val1, + b'fam1:col2': val2, + } self.assertEqual(result, expected_result) def test_with_timestamp(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData - row_data = PartialRowData(b"row-key") - val1 = b"hi-im-bytes" + row_data = PartialRowData(b'row-key') + val1 = b'hi-im-bytes' ts1_millis = 1221934570148 ts1 = ts1_millis * 1000 - val2 = b"bi-im-hytes" + val2 = b'bi-im-hytes' ts2_millis = 1331934880000 ts2 = ts2_millis * 1000 - row_data._cells[u"fam1"] = { - b"col1": [Cell(val1, ts1)], - b"col2": [Cell(val2, ts2)], + row_data._cells[u'fam1'] = { + b'col1': [Cell(val1, ts1)], + b'col2': [Cell(val2, ts2)], } result = self._call_fut(row_data, include_timestamp=True) expected_result = { - b"fam1:col1": (val1, ts1_millis), - b"fam1:col2": (val2, ts2_millis), + b'fam1:col1': (val1, ts1_millis), + b'fam1:col2': (val2, ts2_millis), } self.assertEqual(result, expected_result) class Test__filter_chain_helper(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _filter_chain_helper - return _filter_chain_helper(*args, **kwargs) def test_no_filters(self): @@ -1234,25 +1252,19 @@ def test_existing_filters(self): # only have one value set. self.assertEqual(result.num_cells, versions) - def _column_helper( - self, - num_filters, - versions=None, - timestamp=None, - column=None, - col_fam=None, - qual=None, - ): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + def _column_helper(self, num_filters, versions=None, timestamp=None, + column=None, col_fam=None, qual=None): + from google.cloud.bigtable.row_filters import ( + ColumnQualifierRegexFilter) from google.cloud.bigtable.row_filters import FamilyNameRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain if col_fam is None: - col_fam = "cf1" + col_fam = 'cf1' if qual is None: - qual = "qual" + qual = 'qual' if column is None: - column = col_fam + ":" + qual + column = col_fam + ':' + qual result = self._call_fut(column, versions=versions, timestamp=timestamp) self.assertTrue(isinstance(result, RowFilterChain)) @@ -1264,8 +1276,8 @@ def _column_helper( # Relies on the fact that RowFilter instances can # only have one value set. - self.assertEqual(fam_filter.regex, col_fam.encode("utf-8")) - self.assertEqual(qual_filter.regex, qual.encode("utf-8")) + self.assertEqual(fam_filter.regex, col_fam.encode('utf-8')) + self.assertEqual(qual_filter.regex, qual.encode('utf-8')) return result @@ -1273,14 +1285,12 @@ def test_column_only(self): self._column_helper(num_filters=2) def test_column_bytes(self): - self._column_helper( - num_filters=2, column=b"cfB:qualY", col_fam=u"cfB", qual=u"qualY" - ) + self._column_helper(num_filters=2, column=b'cfB:qualY', + col_fam=u'cfB', qual=u'qualY') def test_column_unicode(self): - self._column_helper( - num_filters=2, column=u"cfU:qualN", col_fam=u"cfU", qual=u"qualN" - ) + self._column_helper(num_filters=2, column=u'cfU:qualN', + col_fam=u'cfU', qual=u'qualN') def test_with_versions(self): from google.cloud.bigtable.row_filters import CellsColumnLimitFilter @@ -1315,13 +1325,14 @@ def test_with_timestamp(self): def test_with_all_options(self): versions = 11 timestamp = 1441928298571 - self._column_helper(num_filters=4, versions=versions, timestamp=timestamp) + self._column_helper(num_filters=4, versions=versions, + timestamp=timestamp) class Test__columns_filter_helper(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _columns_filter_helper - return _columns_filter_helper(*args, **kwargs) def test_no_columns(self): @@ -1332,22 +1343,23 @@ def test_no_columns(self): def test_single_column(self): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter - col_fam = "cf1" + col_fam = 'cf1' columns = [col_fam] result = self._call_fut(columns) expected_result = FamilyNameRegexFilter(col_fam) self.assertEqual(result, expected_result) def test_column_and_column_families(self): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.row_filters import ( + ColumnQualifierRegexFilter) from google.cloud.bigtable.row_filters import FamilyNameRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain from google.cloud.bigtable.row_filters import RowFilterUnion - col_fam1 = "cf1" - col_fam2 = "cf2" - col_qual2 = "qual2" - columns = [col_fam1, col_fam2 + ":" + col_qual2] + col_fam1 = 'cf1' + col_fam2 = 'cf2' + col_qual2 = 'qual2' + columns = [col_fam1, col_fam2 + ':' + col_qual2] result = self._call_fut(columns) self.assertTrue(isinstance(result, RowFilterUnion)) @@ -1356,20 +1368,20 @@ def test_column_and_column_families(self): filter2 = result.filters[1] self.assertTrue(isinstance(filter1, FamilyNameRegexFilter)) - self.assertEqual(filter1.regex, col_fam1.encode("utf-8")) + self.assertEqual(filter1.regex, col_fam1.encode('utf-8')) self.assertTrue(isinstance(filter2, RowFilterChain)) filter2a, filter2b = filter2.filters self.assertTrue(isinstance(filter2a, FamilyNameRegexFilter)) - self.assertEqual(filter2a.regex, col_fam2.encode("utf-8")) + self.assertEqual(filter2a.regex, col_fam2.encode('utf-8')) self.assertTrue(isinstance(filter2b, ColumnQualifierRegexFilter)) - self.assertEqual(filter2b.regex, col_qual2.encode("utf-8")) + self.assertEqual(filter2b.regex, col_qual2.encode('utf-8')) class Test___get_row_set_object(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _get_row_set_object - return _get_row_set_object(*args, **kwargs) def test_row_set_object(self): @@ -1383,15 +1395,15 @@ def test_row_set_object(self): class Test___get_row_set_from_rows(unittest.TestCase): + def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _get_row_set_from_rows - return _get_row_set_from_rows(*args, **kwargs) def test_row_set_object(self): from google.cloud.bigtable.row_set import RowSet - rows = ["row_key1", "row_key2"] + rows = ['row_key1', 'row_key2'] row_set = self._call_fut(rows) self.assertIsInstance(row_set, RowSet) @@ -1399,17 +1411,20 @@ def test_row_set_object(self): class _Connection(object): + def __init__(self, instance): self._instance = instance class _MockLowLevelColumnFamily(object): + def __init__(self, column_family_id, gc_rule=None): self.column_family_id = column_family_id self.gc_rule = gc_rule class _MockLowLevelTable(object): + def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs @@ -1453,7 +1468,8 @@ def __init__(self, row_key, commit_result=None): self.commit_result = commit_result def increment_cell_value(self, column_family_id, column, int_value): - count = self.counts.setdefault((column_family_id, column), self.COUNTER_DEFAULT) + count = self.counts.setdefault((column_family_id, column), + self.COUNTER_DEFAULT) self.counts[(column_family_id, column)] = count + int_value def commit(self): @@ -1461,6 +1477,7 @@ def commit(self): class _MockBatch(object): + def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs @@ -1482,6 +1499,7 @@ def delete(self, *args): class _MockPartialRowsData(object): + def __init__(self, rows=None, iterations=0): self.rows = rows or {} self.consume_all_calls = 0