From c3f171c911d17804d695997b3a9656d1f987a2db Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Wed, 30 Jan 2019 12:46:49 -0800 Subject: [PATCH] Blacken (#64) --- docs/conf.py | 179 ++++--- scripts/pycodestyle_on_repo.py | 9 +- scripts/run_pylint.py | 142 +++-- scripts/verify_included_modules.py | 73 ++- src/google/__init__.py | 4 +- src/google/cloud/__init__.py | 4 +- src/google/cloud/happybase/batch.py | 73 +-- src/google/cloud/happybase/connection.py | 119 +++-- src/google/cloud/happybase/pool.py | 18 +- src/google/cloud/happybase/table.py | 206 ++++---- system_tests/attempt_system_tests.py | 61 ++- system_tests/happybase.py | 412 +++++++-------- system_tests/retry.py | 66 ++- system_tests/run_system_test.py | 30 +- system_tests/system_test_utils.py | 16 +- unit_tests/test_batch.py | 131 ++--- unit_tests/test_connection.py | 213 ++++---- unit_tests/test_pool.py | 41 +- unit_tests/test_table.py | 646 +++++++++++------------ 19 files changed, 1237 insertions(+), 1206 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 3dec28c..eebce7b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -32,86 +32,86 @@ import sphinx_rtd_theme -ON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True' +ON_READ_THE_DOCS = os.environ.get("READTHEDOCS", None) == "True" # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath("..")) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'google-cloud-happybase' -copyright = u'2014, Google' +project = u"google-cloud-happybase" +copyright = u"2014, Google" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -distro = get_distribution('google-cloud-happybase') -release = os.getenv('SPHINX_RELEASE', distro.version) +distro = get_distribution("google-cloud-happybase") +release = os.getenv("SPHINX_RELEASE", distro.version) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- @@ -120,83 +120,83 @@ # a list of builtin themes. if not ON_READ_THE_DOCS: - html_theme = 'sphinx_rtd_theme' + html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '_static/images/favicon.ico' +html_favicon = "_static/images/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] -html_add_permalinks = '#' +html_add_permalinks = "#" # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'googlecloudhappybasedoc' +htmlhelp_basename = "googlecloudhappybasedoc" html_context = {} @@ -204,45 +204,47 @@ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } metadata = distro.get_metadata(distro.PKG_INFO) -author = message_from_string(metadata).get('Author') +author = message_from_string(metadata).get("Author") # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'google-cloud-happybase.tex', - u'google-cloud-happybase Documentation', - author, 'manual'), + ( + "index", + "google-cloud-happybase.tex", + u"google-cloud-happybase Documentation", + author, + "manual", + ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output -------------------------------------------- @@ -250,13 +252,17 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'google-cloud-happybase', - u'google-cloud-happybase Documentation', - [author], 1) + ( + "index", + "google-cloud-happybase", + u"google-cloud-happybase Documentation", + [author], + 1, + ) ] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ @@ -265,32 +271,39 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'google-cloud-happybase', - u'google-cloud-happybase Documentation', - author, 'google-cloud-happybase', 'Python API for Google Cloud HappyBase.', - 'Miscellaneous'), + ( + "index", + "google-cloud-happybase", + u"google-cloud-happybase Documentation", + author, + "google-cloud-happybase", + "Python API for Google Cloud HappyBase.", + "Miscellaneous", + ) ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # This pulls class descriptions from the class docstring, # and parameter definitions from the __init__ docstring. -autoclass_content = 'both' +autoclass_content = "both" # Configuration for intersphinx: # Refer to the Python standard library and the oauth2client and # httplib2 libraries. intersphinx_mapping = { - 'httplib2': ('http://httplib2.readthedocs.io/en/latest/', None), - 'google-cloud-python': ( - 'https://google-cloud-python.readthedocs.io/en/latest/', None), - 'oauth2client': ('http://oauth2client.readthedocs.io/en/latest', None), - 'python': ('https://docs.python.org/2', None), + "httplib2": ("http://httplib2.readthedocs.io/en/latest/", None), + "google-cloud-python": ( + "https://google-cloud-python.readthedocs.io/en/latest/", + None, + ), + "oauth2client": ("http://oauth2client.readthedocs.io/en/latest", None), + "python": ("https://docs.python.org/2", None), } diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py index 1328b32..614ce50 100644 --- a/scripts/pycodestyle_on_repo.py +++ b/scripts/pycodestyle_on_repo.py @@ -26,16 +26,15 @@ def main(): """Run pycodestyle on all Python files in the repository.""" - git_root = subprocess.check_output( - ['git', 'rev-parse', '--show-toplevel']).strip() + git_root = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip() os.chdir(git_root) - python_files = subprocess.check_output(['git', 'ls-files', '*py']) + python_files = subprocess.check_output(["git", "ls-files", "*py"]) python_files = python_files.strip().split() - pycodestyle_command = ['pycodestyle'] + python_files + pycodestyle_command = ["pycodestyle"] + python_files status_code = subprocess.call(pycodestyle_command) sys.exit(status_code) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index 8725548..da25084 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -30,58 +30,46 @@ import sys -IGNORED_DIRECTORIES = [ -] -IGNORED_FILES = [ - os.path.join('docs', 'conf.py'), -] +IGNORED_DIRECTORIES = [] +IGNORED_FILES = [os.path.join("docs", "conf.py")] IGNORED_POSTFIXES = [ - os.path.join('google', '__init__.py'), - os.path.join('google', 'cloud', '__init__.py'), - 'setup.py', + os.path.join("google", "__init__.py"), + os.path.join("google", "cloud", "__init__.py"), + "setup.py", ] SCRIPTS_DIR = os.path.abspath(os.path.dirname(__file__)) -PRODUCTION_RC = os.path.join(SCRIPTS_DIR, 'pylintrc_default') -TEST_RC = os.path.join(SCRIPTS_DIR, 'pylintrc_reduced') +PRODUCTION_RC = os.path.join(SCRIPTS_DIR, "pylintrc_default") +TEST_RC = os.path.join(SCRIPTS_DIR, "pylintrc_reduced") TEST_DISABLED_MESSAGES = [ - 'abstract-method', - 'arguments-differ', - 'assignment-from-no-return', - 'attribute-defined-outside-init', - 'exec-used', - 'import-error', - 'invalid-name', - 'missing-docstring', - 'no-init', - 'no-self-use', - 'superfluous-parens', - 'too-few-public-methods', - 'too-many-locals', - 'too-many-public-methods', - 'unbalanced-tuple-unpacking', + "abstract-method", + "arguments-differ", + "assignment-from-no-return", + "attribute-defined-outside-init", + "exec-used", + "import-error", + "invalid-name", + "missing-docstring", + "no-init", + "no-self-use", + "superfluous-parens", + "too-few-public-methods", + "too-many-locals", + "too-many-public-methods", + "unbalanced-tuple-unpacking", ] -TEST_RC_ADDITIONS = { - 'MESSAGES CONTROL': { - 'disable': ', '.join(TEST_DISABLED_MESSAGES), - }, -} -TEST_RC_REPLACEMENTS = { - 'FORMAT': { - 'max-module-lines': 1950, - }, -} +TEST_RC_ADDITIONS = {"MESSAGES CONTROL": {"disable": ", ".join(TEST_DISABLED_MESSAGES)}} +TEST_RC_REPLACEMENTS = {"FORMAT": {"max-module-lines": 1950}} def read_config(filename): """Reads pylintrc config onto native ConfigParser object.""" config = ConfigParser.ConfigParser() - with open(filename, 'r') as file_obj: + with open(filename, "r") as file_obj: config.readfp(file_obj) return config -def make_test_rc(base_rc_filename, additions_dict, - replacements_dict, target_filename): +def make_test_rc(base_rc_filename, additions_dict, replacements_dict, target_filename): """Combines a base rc and test additions into single file.""" main_cfg = read_config(base_rc_filename) @@ -90,25 +78,23 @@ def make_test_rc(base_rc_filename, additions_dict, test_cfg._sections = copy.deepcopy(main_cfg._sections) for section, opts in additions_dict.items(): - curr_section = test_cfg._sections.setdefault( - section, test_cfg._dict()) + curr_section = test_cfg._sections.setdefault(section, test_cfg._dict()) for opt, opt_val in opts.items(): curr_val = curr_section.get(opt) if curr_val is None: - raise KeyError('Expected to be adding to existing option.') - curr_val = curr_val.rstrip(',') - curr_section[opt] = '%s, %s' % (curr_val, opt_val) + raise KeyError("Expected to be adding to existing option.") + curr_val = curr_val.rstrip(",") + curr_section[opt] = "%s, %s" % (curr_val, opt_val) for section, opts in replacements_dict.items(): - curr_section = test_cfg._sections.setdefault( - section, test_cfg._dict()) + curr_section = test_cfg._sections.setdefault(section, test_cfg._dict()) for opt, opt_val in opts.items(): curr_val = curr_section.get(opt) if curr_val is None: - raise KeyError('Expected to be replacing existing option.') - curr_section[opt] = '%s' % (opt_val,) + raise KeyError("Expected to be replacing existing option.") + curr_section[opt] = "%s" % (opt_val,) - with open(target_filename, 'w') as file_obj: + with open(target_filename, "w") as file_obj: test_cfg.write(file_obj) @@ -120,8 +106,7 @@ def valid_filename(filename): for directory in IGNORED_DIRECTORIES: if filename.startswith(directory): return False - return (filename.endswith('.py') and - filename not in IGNORED_FILES) + return filename.endswith(".py") and filename not in IGNORED_FILES def is_production_filename(filename): @@ -130,7 +115,7 @@ def is_production_filename(filename): :rtype: bool :returns: Boolean indicating production status. """ - return 'test' not in filename and 'docs' not in filename + return "test" not in filename and "docs" not in filename def get_files_for_linting(allow_limited=True): @@ -159,30 +144,31 @@ def get_files_for_linting(allow_limited=True): linted. """ diff_base = None - if (os.getenv('TRAVIS_BRANCH') == 'master' and - os.getenv('TRAVIS_PULL_REQUEST') != 'false'): + if ( + os.getenv("TRAVIS_BRANCH") == "master" + and os.getenv("TRAVIS_PULL_REQUEST") != "false" + ): # In the case of a pull request into master, we want to # diff against HEAD in master. - diff_base = 'origin/master' - elif os.getenv('TRAVIS') is None: + diff_base = "origin/master" + elif os.getenv("TRAVIS") is None: # Only allow specified remote and branch in local dev. - remote = os.getenv('GOOGLE_CLOUD_REMOTE_FOR_LINT') - branch = os.getenv('GOOGLE_CLOUD_BRANCH_FOR_LINT') + remote = os.getenv("GOOGLE_CLOUD_REMOTE_FOR_LINT") + branch = os.getenv("GOOGLE_CLOUD_BRANCH_FOR_LINT") if remote is not None and branch is not None: - diff_base = '%s/%s' % (remote, branch) + diff_base = "%s/%s" % (remote, branch) if diff_base is not None and allow_limited: - result = subprocess.check_output(['git', 'diff', '--name-only', - diff_base]) - print('Using files changed relative to %s:' % (diff_base,)) - print('-' * 60) - print(result.rstrip('\n')) # Don't print trailing newlines. - print('-' * 60) + result = subprocess.check_output(["git", "diff", "--name-only", diff_base]) + print("Using files changed relative to %s:" % (diff_base,)) + print("-" * 60) + print(result.rstrip("\n")) # Don't print trailing newlines. + print("-" * 60) else: - print('Diff base not specified, listing all files in repository.') - result = subprocess.check_output(['git', 'ls-files']) + print("Diff base not specified, listing all files in repository.") + result = subprocess.check_output(["git", "ls-files"]) - return result.rstrip('\n').split('\n'), diff_base + return result.rstrip("\n").split("\n"), diff_base def get_python_files(all_files=None): @@ -222,11 +208,10 @@ def lint_fileset(filenames, rcfile, description): # could spit out deleted / renamed files. Another alternative could # be to use 'git diff --name-status' and filter out files with a # status of 'D'. - filenames = [filename for filename in filenames - if os.path.exists(filename)] + filenames = [filename for filename in filenames if os.path.exists(filename)] if filenames: - rc_flag = '--rcfile=%s' % (rcfile,) - pylint_shell_command = ['pylint', rc_flag] + rc_flag = "--rcfile=%s" % (rcfile,) + pylint_shell_command = ["pylint", rc_flag] errors = {} # filename -> status_code for filename in filenames: cmd = pylint_shell_command + [filename] @@ -235,22 +220,21 @@ def lint_fileset(filenames, rcfile, description): errors[filename] = status_code if errors: for filename, status_code in sorted(errors.items()): - print('%-30s: %d' % (filename, status_code), file=sys.stderr) + print("%-30s: %d" % (filename, status_code), file=sys.stderr) sys.exit(len(errors)) else: - print('Skipping %s, no files to lint.' % (description,)) + print("Skipping %s, no files to lint." % (description,)) def main(): """Script entry point. Lints both sets of files.""" - make_test_rc(PRODUCTION_RC, TEST_RC_ADDITIONS, - TEST_RC_REPLACEMENTS, TEST_RC) + make_test_rc(PRODUCTION_RC, TEST_RC_ADDITIONS, TEST_RC_REPLACEMENTS, TEST_RC) library_files, non_library_files, diff_base = get_python_files() if diff_base: - print('Checking only files which differ from base.') - lint_fileset(library_files, PRODUCTION_RC, 'library code') - lint_fileset(non_library_files, TEST_RC, 'test code') + print("Checking only files which differ from base.") + lint_fileset(library_files, PRODUCTION_RC, "library code") + lint_fileset(non_library_files, TEST_RC, "test code") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/scripts/verify_included_modules.py b/scripts/verify_included_modules.py index d1cd154..2a34e78 100644 --- a/scripts/verify_included_modules.py +++ b/scripts/verify_included_modules.py @@ -25,21 +25,16 @@ from sphinx.ext.intersphinx import fetch_inventory -BASE_DIR = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..')) -DOCS_DIR = os.path.join(BASE_DIR, 'docs') -IGNORED_PREFIXES = ('test_', '_') -IGNORED_MODULES = frozenset([ - 'google.__init__', - 'google.cloud.__init__', -]) -PACKAGES = ( - 'src', -) +BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +DOCS_DIR = os.path.join(BASE_DIR, "docs") +IGNORED_PREFIXES = ("test_", "_") +IGNORED_MODULES = frozenset(["google.__init__", "google.cloud.__init__"]) +PACKAGES = ("src",) class SphinxApp(object): """Mock app to interact with Sphinx helpers.""" + warn = warnings.warn srcdir = DOCS_DIR @@ -56,9 +51,9 @@ def is_valid_module(filename): :rtype: bool :returns: Flag indicating if the filename is valid. """ - if not filename.endswith('.py'): + if not filename.endswith(".py"): return False - if filename == '__init__.py': + if filename == "__init__.py": return True for prefix in IGNORED_PREFIXES: if filename.startswith(prefix): @@ -82,8 +77,7 @@ def get_public_modules(path, base_package=None): result = [] for subdir, _, files in os.walk(path): # Skip folders that start with _. - if any([part.startswith('_') - for part in subdir.split(os.path.sep)]): + if any([part.startswith("_") for part in subdir.split(os.path.sep)]): continue _, rel_dir = subdir.split(path) rel_dir = rel_dir.lstrip(os.path.sep) @@ -94,48 +88,49 @@ def get_public_modules(path, base_package=None): if base_package is not None: rel_path = os.path.join(base_package, rel_path) # Turn into a Python module rather than a file path. - result.append(rel_path.replace(os.path.sep, '.')) + result.append(rel_path.replace(os.path.sep, ".")) return result -def verify_modules(build_root='_build'): +def verify_modules(build_root="_build"): """Verify modules included. :type build_root: str :param build_root: The root of the directory where docs are built into. Defaults to ``_build``. """ - object_inventory_relpath = os.path.join(build_root, 'html', 'objects.inv') + object_inventory_relpath = os.path.join(build_root, "html", "objects.inv") - mock_uri = '' - inventory = fetch_inventory(SphinxApp, mock_uri, - object_inventory_relpath) - sphinx_mods = set(inventory['py:module'].keys()) + mock_uri = "" + inventory = fetch_inventory(SphinxApp, mock_uri, object_inventory_relpath) + sphinx_mods = set(inventory["py:module"].keys()) public_mods = set() for package in PACKAGES: - library_dir = os.path.join(BASE_DIR, package, 'google', 'cloud') - package_mods = get_public_modules(library_dir, - base_package='google.cloud') + library_dir = os.path.join(BASE_DIR, package, "google", "cloud") + package_mods = get_public_modules(library_dir, base_package="google.cloud") public_mods.update(package_mods) if not sphinx_mods <= public_mods: unexpected_mods = sphinx_mods - public_mods - message = ['Unexpected error. There were modules referenced by ' - 'Sphinx that are not among the public modules.'] - message.extend(['- %s' % (mod,) for mod in unexpected_mods]) - print('\n'.join(message), file=sys.stderr) + message = [ + "Unexpected error. There were modules referenced by " + "Sphinx that are not among the public modules." + ] + message.extend(["- %s" % (mod,) for mod in unexpected_mods]) + print("\n".join(message), file=sys.stderr) sys.exit(1) undocumented_mods = public_mods - sphinx_mods # Remove ignored modules. undocumented_mods -= IGNORED_MODULES if undocumented_mods: - message_parts = ['Found undocumented public modules:'] - message_parts.extend(['- ' + mod_name - for mod_name in sorted(undocumented_mods)]) - print('\n'.join(message_parts), file=sys.stderr) + message_parts = ["Found undocumented public modules:"] + message_parts.extend( + ["- " + mod_name for mod_name in sorted(undocumented_mods)] + ) + print("\n".join(message_parts), file=sys.stderr) sys.exit(1) @@ -145,11 +140,13 @@ def get_parser(): :rtype: :class:`argparse.ArgumentParser` :returns: The parser for this script. """ - description = ('Run check that all google-cloud ' - 'modules are included in docs.') + description = "Run check that all google-cloud " "modules are included in docs." parser = argparse.ArgumentParser(description=description) - parser.add_argument('--build-root', dest='build_root', - help='The root directory where docs are located.') + parser.add_argument( + "--build-root", + dest="build_root", + help="The root directory where docs are located.", + ) return parser @@ -160,5 +157,5 @@ def main(): verify_modules(build_root=args.build_root) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/src/google/__init__.py b/src/google/__init__.py index 1fbf189..102caa9 100644 --- a/src/google/__init__.py +++ b/src/google/__init__.py @@ -13,6 +13,6 @@ # limitations under the License. try: - __import__('pkg_resources').declare_namespace(__name__) + __import__("pkg_resources").declare_namespace(__name__) except ImportError: - __path__ = __import__('pkgutil').extend_path(__path__, __name__) + __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/src/google/cloud/__init__.py b/src/google/cloud/__init__.py index 1fbf189..102caa9 100644 --- a/src/google/cloud/__init__.py +++ b/src/google/cloud/__init__.py @@ -13,6 +13,6 @@ # limitations under the License. try: - __import__('pkg_resources').declare_namespace(__name__) + __import__("pkg_resources").declare_namespace(__name__) except ImportError: - __path__ = __import__('pkgutil').extend_path(__path__, __name__) + __path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/src/google/cloud/happybase/batch.py b/src/google/cloud/happybase/batch.py index 2b9bf1f..e34a4ed 100644 --- a/src/google/cloud/happybase/batch.py +++ b/src/google/cloud/happybase/batch.py @@ -27,8 +27,9 @@ _WAL_SENTINEL = object() # Assumed granularity of timestamps in Cloud Bigtable. _ONE_MILLISECOND = datetime.timedelta(microseconds=1000) -_WAL_WARNING = ('The wal argument (Write-Ahead-Log) is not ' - 'supported by Cloud Bigtable.') +_WAL_WARNING = ( + "The wal argument (Write-Ahead-Log) is not " "supported by Cloud Bigtable." +) class Batch(object): @@ -72,17 +73,24 @@ class Batch(object): is not positive. """ - def __init__(self, table, timestamp=None, batch_size=None, - transaction=False, wal=_WAL_SENTINEL): + def __init__( + self, + table, + timestamp=None, + batch_size=None, + transaction=False, + wal=_WAL_SENTINEL, + ): if wal is not _WAL_SENTINEL: warnings.warn(_WAL_WARNING) if batch_size is not None: if transaction: - raise TypeError('When batch_size is set, a Batch cannot be ' - 'transactional') + raise TypeError( + "When batch_size is set, a Batch cannot be " "transactional" + ) if batch_size <= 0: - raise ValueError('batch_size must be positive') + raise ValueError("batch_size must be positive") self._table = table self._batch_size = batch_size @@ -159,14 +167,13 @@ def put(self, row, data, wal=_WAL_SENTINEL): row_object = self._get_row(row) # Make sure all the keys are valid before beginning # to add mutations. - column_pairs = _get_column_pairs(six.iterkeys(data), - require_qualifier=True) + column_pairs = _get_column_pairs(six.iterkeys(data), require_qualifier=True) for column_family_id, column_qualifier in column_pairs: - value = data[(column_family_id + ':' + - column_qualifier).encode('utf-8')] - row_object.set_cell(column_family_id, column_qualifier, - value, timestamp=self._timestamp) + value = data[(column_family_id + ":" + column_qualifier).encode("utf-8")] + row_object.set_cell( + column_family_id, column_qualifier, value, timestamp=self._timestamp + ) self._mutation_count += len(data) self._try_send() @@ -192,15 +199,18 @@ def _delete_columns(self, columns, row_object): for column_family_id, column_qualifier in column_pairs: if column_qualifier is None: if self._delete_range is not None: - raise ValueError('The Cloud Bigtable API does not support ' - 'adding a timestamp to ' - '"DeleteFromFamily" ') - row_object.delete_cells(column_family_id, - columns=row_object.ALL_COLUMNS) + raise ValueError( + "The Cloud Bigtable API does not support " + "adding a timestamp to " + '"DeleteFromFamily" ' + ) + row_object.delete_cells( + column_family_id, columns=row_object.ALL_COLUMNS + ) else: - row_object.delete_cell(column_family_id, - column_qualifier, - time_range=self._delete_range) + row_object.delete_cell( + column_family_id, column_qualifier, time_range=self._delete_range + ) def delete(self, row, columns=None, wal=_WAL_SENTINEL): """Delete data from a row in the table owned by this batch. @@ -234,9 +244,11 @@ def delete(self, row, columns=None, wal=_WAL_SENTINEL): if columns is None: # Delete entire row. if self._delete_range is not None: - raise ValueError('The Cloud Bigtable API does not support ' - 'adding a timestamp to "DeleteFromRow" ' - 'mutations') + raise ValueError( + "The Cloud Bigtable API does not support " + 'adding a timestamp to "DeleteFromRow" ' + "mutations" + ) row_object.delete() self._mutation_count += 1 else: @@ -306,21 +318,20 @@ def _get_column_pairs(columns, require_qualifier=False): column_pairs = [] for column in columns: if isinstance(column, six.binary_type): - column = column.decode('utf-8') + column = column.decode("utf-8") # Remove trailing colons (i.e. for standalone column family). - if column.endswith(u':'): + if column.endswith(u":"): column = column[:-1] - num_colons = column.count(u':') + num_colons = column.count(u":") if num_colons == 0: # column is a column family. if require_qualifier: - raise ValueError('column does not contain a qualifier', - column) + raise ValueError("column does not contain a qualifier", column) else: column_pairs.append([column, None]) elif num_colons == 1: - column_pairs.append(column.split(u':')) + column_pairs.append(column.split(u":")) else: - raise ValueError('Column contains the : separator more than once') + raise ValueError("Column contains the : separator more than once") return column_pairs diff --git a/src/google/cloud/happybase/connection.py b/src/google/cloud/happybase/connection.py index 4a0e77b..9fd5630 100644 --- a/src/google/cloud/happybase/connection.py +++ b/src/google/cloud/happybase/connection.py @@ -47,17 +47,19 @@ DEFAULT_COMPAT = None DEFAULT_PROTOCOL = None -_LEGACY_ARGS = frozenset(('host', 'port', 'compat', 'transport', 'protocol')) -_BASE_DISABLE = 'Cloud Bigtable has no concept of enabled / disabled tables.' -_DISABLE_DELETE_MSG = ('The disable argument should not be used in ' - 'delete_table(). ') + _BASE_DISABLE -_ENABLE_TMPL = 'Connection.enable_table(%r) was called, but ' + _BASE_DISABLE -_DISABLE_TMPL = 'Connection.disable_table(%r) was called, but ' + _BASE_DISABLE -_IS_ENABLED_TMPL = ('Connection.is_table_enabled(%r) was called, but ' + - _BASE_DISABLE) -_COMPACT_TMPL = ('Connection.compact_table(%r, major=%r) was called, but the ' - 'Cloud Bigtable API handles table compactions automatically ' - 'and does not expose an API for it.') +_LEGACY_ARGS = frozenset(("host", "port", "compat", "transport", "protocol")) +_BASE_DISABLE = "Cloud Bigtable has no concept of enabled / disabled tables." +_DISABLE_DELETE_MSG = ( + "The disable argument should not be used in " "delete_table(). " +) + _BASE_DISABLE +_ENABLE_TMPL = "Connection.enable_table(%r) was called, but " + _BASE_DISABLE +_DISABLE_TMPL = "Connection.disable_table(%r) was called, but " + _BASE_DISABLE +_IS_ENABLED_TMPL = "Connection.is_table_enabled(%r) was called, but " + _BASE_DISABLE +_COMPACT_TMPL = ( + "Connection.compact_table(%r, major=%r) was called, but the " + "Cloud Bigtable API handles table compactions automatically " + "and does not expose an API for it." +) def _get_instance(): @@ -77,20 +79,23 @@ def _get_instance(): :raises ValueError: if there is a failed location or any number of instances other than one. """ - client_kwargs = {'admin': True} + client_kwargs = {"admin": True} client = Client(**client_kwargs) instances, failed_locations = client.list_instances() if failed_locations: - raise ValueError('Determining instance via ListInstances encountered ' - 'failed locations.') + raise ValueError( + "Determining instance via ListInstances encountered " "failed locations." + ) num_instances = len(instances) if num_instances == 0: - raise ValueError('This client doesn\'t have access to any instances.') + raise ValueError("This client doesn't have access to any instances.") if num_instances > 1: - raise ValueError('This client has access to more than one instance. ' - 'Please directly pass the instance you\'d ' - 'like to use.') + raise ValueError( + "This client has access to more than one instance. " + "Please directly pass the instance you'd " + "like to use." + ) return instances[0] @@ -132,18 +137,31 @@ class Connection(object): _instance = None - def __init__(self, autoconnect=True, table_prefix=None, - table_prefix_separator='_', instance=None, **kwargs): + def __init__( + self, + autoconnect=True, + table_prefix=None, + table_prefix_separator="_", + instance=None, + **kwargs + ): self._handle_legacy_args(kwargs) if table_prefix is not None: if not isinstance(table_prefix, six.string_types): - raise TypeError('table_prefix must be a string', 'received', - table_prefix, type(table_prefix)) + raise TypeError( + "table_prefix must be a string", + "received", + table_prefix, + type(table_prefix), + ) if not isinstance(table_prefix_separator, six.string_types): - raise TypeError('table_prefix_separator must be a string', - 'received', table_prefix_separator, - type(table_prefix_separator)) + raise TypeError( + "table_prefix_separator must be a string", + "received", + table_prefix_separator, + type(table_prefix_separator), + ) self.table_prefix = table_prefix self.table_prefix_separator = table_prefix_separator @@ -169,15 +187,17 @@ def _handle_legacy_args(arguments_dict): """ common_args = _LEGACY_ARGS.intersection(six.iterkeys(arguments_dict)) if common_args: - all_args = ', '.join(common_args) - message = ('The HappyBase legacy arguments %s were used. These ' - 'arguments are unused by google-cloud.' % (all_args,)) + all_args = ", ".join(common_args) + message = ( + "The HappyBase legacy arguments %s were used. These " + "arguments are unused by google-cloud." % (all_args,) + ) warnings.warn(message) for arg_name in common_args: arguments_dict.pop(arg_name) if arguments_dict: unexpected_names = arguments_dict.keys() - raise TypeError('Received unexpected arguments', unexpected_names) + raise TypeError("Received unexpected arguments", unexpected_names) def open(self): """Open the underlying transport to Cloud Bigtable. @@ -239,15 +259,17 @@ def tables(self): :returns: List of string table names. """ low_level_table_instances = self._instance.list_tables() - table_names = [table_instance.table_id - for table_instance in low_level_table_instances] + table_names = [ + table_instance.table_id for table_instance in low_level_table_instances + ] # Filter using prefix, and strip prefix from names if self.table_prefix is not None: - prefix = self._table_name('') + prefix = self._table_name("") offset = len(prefix) - table_names = [name[offset:] for name in table_names - if name.startswith(prefix)] + table_names = [ + name[offset:] for name in table_names if name.startswith(prefix) + ] return table_names @@ -290,18 +312,19 @@ def create_table(self, name, families): table exists. """ if not isinstance(families, dict): - raise TypeError('families arg must be a dictionary') + raise TypeError("families arg must be a dictionary") if not families: - raise ValueError('Cannot create table %r (no column ' - 'families specified)' % (name,)) + raise ValueError( + "Cannot create table %r (no column " "families specified)" % (name,) + ) # Parse all keys before making any API requests. gc_rule_dict = {} for column_family_name, option in families.items(): if isinstance(column_family_name, six.binary_type): - column_family_name = column_family_name.decode('utf-8') - if column_family_name.endswith(':'): + column_family_name = column_family_name.decode("utf-8") + if column_family_name.endswith(":"): column_family_name = column_family_name[:-1] gc_rule_dict[column_family_name] = _parse_family_option(option) @@ -422,17 +445,19 @@ def _parse_family_option(option): """ result = option if isinstance(result, dict): - if not set(result.keys()) <= set(['max_versions', 'time_to_live']): - all_keys = ', '.join(repr(key) for key in result.keys()) - warning_msg = ('Cloud Bigtable only supports max_versions and ' - 'time_to_live column family settings. ' - 'Received: %s' % (all_keys,)) + if not set(result.keys()) <= set(["max_versions", "time_to_live"]): + all_keys = ", ".join(repr(key) for key in result.keys()) + warning_msg = ( + "Cloud Bigtable only supports max_versions and " + "time_to_live column family settings. " + "Received: %s" % (all_keys,) + ) warnings.warn(warning_msg) - max_num_versions = result.get('max_versions') + max_num_versions = result.get("max_versions") max_age = None - if 'time_to_live' in result: - max_age = datetime.timedelta(seconds=result['time_to_live']) + if "time_to_live" in result: + max_age = datetime.timedelta(seconds=result["time_to_live"]) versions_rule = age_rule = None if max_num_versions is not None: diff --git a/src/google/cloud/happybase/pool.py b/src/google/cloud/happybase/pool.py index 552dc5e..c24bd97 100644 --- a/src/google/cloud/happybase/pool.py +++ b/src/google/cloud/happybase/pool.py @@ -62,21 +62,22 @@ class ConnectionPool(object): :class:`ValueError ` if ``size`` is not positive. """ + def __init__(self, size, **kwargs): if not isinstance(size, six.integer_types): - raise TypeError('Pool size arg must be an integer') + raise TypeError("Pool size arg must be an integer") if size < _MIN_POOL_SIZE: - raise ValueError('Pool size must be positive') + raise ValueError("Pool size must be positive") self._lock = threading.Lock() self._queue = six.moves.queue.LifoQueue(maxsize=size) self._thread_connections = threading.local() connection_kwargs = kwargs - connection_kwargs['autoconnect'] = False - if 'instance' not in connection_kwargs: - connection_kwargs['instance'] = _get_instance() + connection_kwargs["autoconnect"] = False + if "instance" not in connection_kwargs: + connection_kwargs["instance"] = _get_instance() for _ in six.moves.range(size): connection = Connection(**connection_kwargs) @@ -97,8 +98,9 @@ def _acquire_connection(self, timeout=None): try: return self._queue.get(block=True, timeout=timeout) except six.moves.queue.Empty: - raise NoConnectionsAvailable('No connection available from pool ' - 'within specified timeout') + raise NoConnectionsAvailable( + "No connection available from pool " "within specified timeout" + ) @contextlib.contextmanager def connection(self, timeout=None): @@ -125,7 +127,7 @@ def connection(self, timeout=None): retrieved from the pool before the ``timeout`` (only if a timeout is specified). """ - connection = getattr(self._thread_connections, 'current', None) + connection = getattr(self._thread_connections, "current", None) retrieved_new_cnxn = False if connection is None: diff --git a/src/google/cloud/happybase/table.py b/src/google/cloud/happybase/table.py index 5b78cbe..aa59b9d 100644 --- a/src/google/cloud/happybase/table.py +++ b/src/google/cloud/happybase/table.py @@ -41,8 +41,8 @@ from google.cloud.happybase.batch import Batch -_PACK_I64 = struct.Struct('>q').pack -_UNPACK_I64 = struct.Struct('>q').unpack +_PACK_I64 = struct.Struct(">q").pack +_UNPACK_I64 = struct.Struct(">q").unpack _SIMPLE_GC_RULES = (MaxAgeGCRule, MaxVersionsGCRule) @@ -65,10 +65,14 @@ def make_row(cell_map, include_timestamp): :raises: :class:`NotImplementedError ` always """ - raise NotImplementedError('The Cloud Bigtable API output is not the same ' - 'as the output from the Thrift server, so this ' - 'helper can not be implemented.', 'Called with', - cell_map, include_timestamp) + raise NotImplementedError( + "The Cloud Bigtable API output is not the same " + "as the output from the Thrift server, so this " + "helper can not be implemented.", + "Called with", + cell_map, + include_timestamp, + ) def make_ordered_row(sorted_columns, include_timestamp): @@ -89,10 +93,14 @@ def make_ordered_row(sorted_columns, include_timestamp): :raises: :class:`NotImplementedError ` always """ - raise NotImplementedError('The Cloud Bigtable API output is not the same ' - 'as the output from the Thrift server, so this ' - 'helper can not be implemented.', 'Called with', - sorted_columns, include_timestamp) + raise NotImplementedError( + "The Cloud Bigtable API output is not the same " + "as the output from the Thrift server, so this " + "helper can not be implemented.", + "Called with", + sorted_columns, + include_timestamp, + ) class Table(object): @@ -114,11 +122,10 @@ def __init__(self, name, connection): self.connection = connection self._low_level_table = None if self.connection is not None: - self._low_level_table = _LowLevelTable(self.name, - self.connection._instance) + self._low_level_table = _LowLevelTable(self.name, self.connection._instance) def __repr__(self): - return '' % (self.name,) + return "" % (self.name,) def families(self): """Retrieve the column families for this table. @@ -145,8 +152,10 @@ def regions(self): :raises: :class:`NotImplementedError ` always """ - raise NotImplementedError('The Cloud Bigtable API does not have a ' - 'concept of splitting a table into regions.') + raise NotImplementedError( + "The Cloud Bigtable API does not have a " + "concept of splitting a table into regions." + ) def row(self, row, columns=None, timestamp=None, include_timestamp=False): """Retrieve a single row of data. @@ -182,19 +191,17 @@ def row(self, row, columns=None, timestamp=None, include_timestamp=False): if columns is not None: filters.append(_columns_filter_helper(columns)) # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, filters=filters) - partial_row_data = self._low_level_table.read_row( - row, filter_=filter_) + partial_row_data = self._low_level_table.read_row(row, filter_=filter_) if partial_row_data is None: return {} - return _partial_row_to_dict(partial_row_data, - include_timestamp=include_timestamp) + return _partial_row_to_dict( + partial_row_data, include_timestamp=include_timestamp + ) - def rows(self, rows, columns=None, timestamp=None, - include_timestamp=False): + def rows(self, rows, columns=None, timestamp=None, include_timestamp=False): """Retrieve multiple rows of data. All optional arguments behave the same in this method as they do in @@ -233,11 +240,10 @@ def rows(self, rows, columns=None, timestamp=None, row_set = _get_row_set_from_rows(rows) # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, filters=filters) rows_generator = self._low_level_table.read_rows( - row_set=row_set, - filter_=filter_) + row_set=row_set, filter_=filter_ + ) # NOTE: We could use max_loops = 1000 or some similar value to ensure # that the stream isn't open too long. @@ -245,12 +251,14 @@ def rows(self, rows, columns=None, timestamp=None, for rowdata in rows_generator: curr_row_data = rowdata curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp) + curr_row_data, include_timestamp=include_timestamp + ) result.append((curr_row_data.row_key, curr_row_dict)) return result - def cells(self, row, column, versions=None, timestamp=None, - include_timestamp=False): + def cells( + self, row, column, versions=None, timestamp=None, include_timestamp=False + ): """Retrieve multiple versions of a single cell from the table. :type row: str @@ -277,8 +285,9 @@ def cells(self, row, column, versions=None, timestamp=None, :returns: List of values in the cell (with timestamps if ``include_timestamp`` is :data:`True`). """ - filter_ = _filter_chain_helper(column=column, versions=versions, - timestamp=timestamp) + filter_ = _filter_chain_helper( + column=column, versions=versions, timestamp=timestamp + ) partial_row_data = self._low_level_table.read_row(row, filter_=filter_) if partial_row_data is None: return [] @@ -286,18 +295,25 @@ def cells(self, row, column, versions=None, timestamp=None, cells = partial_row_data._cells # We know that `_filter_chain_helper` has already verified that # column will split as such. - column = column.decode('utf-8') - column_family_id, column_qualifier = column.split(':') + column = column.decode("utf-8") + column_family_id, column_qualifier = column.split(":") # NOTE: We expect the only key in `cells` is `column_family_id` # and the only key `cells[column_family_id]` is # `column_qualifier`. But we don't check that this is true. - curr_cells = cells[column_family_id][column_qualifier.encode('utf-8')] - return _cells_to_pairs( - curr_cells, include_timestamp=include_timestamp) - - def scan(self, row_start=None, row_stop=None, row_prefix=None, - columns=None, timestamp=None, - include_timestamp=False, limit=None, **kwargs): + curr_cells = cells[column_family_id][column_qualifier.encode("utf-8")] + return _cells_to_pairs(curr_cells, include_timestamp=include_timestamp) + + def scan( + self, + row_start=None, + row_stop=None, + row_prefix=None, + columns=None, + timestamp=None, + include_timestamp=False, + limit=None, + **kwargs + ): """Create a scanner for data in this table. This method returns a generator that can be used for looping over the @@ -382,18 +398,21 @@ def scan(self, row_start=None, row_stop=None, row_prefix=None, ``filter`` is used. """ row_start, row_stop, filter_chain = _scan_filter_helper( - row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs) + row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs + ) row_set = _get_row_set_object(row_start, row_stop) rows_generator = self._low_level_table.read_rows( - row_set=row_set, limit=limit, filter_=filter_chain) + row_set=row_set, limit=limit, filter_=filter_chain + ) for rowdata in rows_generator: curr_row_data = rowdata # NOTE: We expect len(rows_dict) == 0, but don't check it. curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp) + curr_row_data, include_timestamp=include_timestamp + ) yield (curr_row_data.row_key, curr_row_dict) def put(self, row, data, timestamp=None, wal=_WAL_SENTINEL): @@ -463,8 +482,9 @@ def delete(self, row, columns=None, timestamp=None, wal=_WAL_SENTINEL): with self.batch(timestamp=timestamp, wal=wal) as batch: batch.delete(row, columns) - def batch(self, timestamp=None, batch_size=None, transaction=False, - wal=_WAL_SENTINEL): + def batch( + self, timestamp=None, batch_size=None, transaction=False, wal=_WAL_SENTINEL + ): """Create a new batch operation for this table. This method returns a new @@ -497,8 +517,13 @@ def batch(self, timestamp=None, batch_size=None, transaction=False, :rtype: :class:`~google.cloud.bigtable.happybase.batch.Batch` :returns: A batch bound to this table. """ - return Batch(self, timestamp=timestamp, batch_size=batch_size, - transaction=transaction, wal=wal) + return Batch( + self, + timestamp=timestamp, + batch_size=batch_size, + transaction=transaction, + wal=wal, + ) def counter_get(self, row, column): """Retrieve the current value of a counter column. @@ -569,8 +594,8 @@ def counter_inc(self, row, column, value=1): """ row = self._low_level_table.row(row, append=True) if isinstance(column, six.binary_type): - column = column.decode('utf-8') - column_family_id, column_qualifier = column.split(':') + column = column.decode("utf-8") + column_family_id, column_qualifier = column.split(":") row.increment_cell_value(column_family_id, column_qualifier, value) # See AppendRow.commit() will return a dictionary: # { @@ -585,10 +610,11 @@ def counter_inc(self, row, column, value=1): modified_cells = row.commit() # Get the cells in the modified column, column_cells = modified_cells[column_family_id][ - column_qualifier.encode('utf-8')] + column_qualifier.encode("utf-8") + ] # Make sure there is exactly one cell in the column. if len(column_cells) != 1: - raise ValueError('Expected server to return one modified cell.') + raise ValueError("Expected server to return one modified cell.") column_cell = column_cells[0] # Get the bytes value from the column and convert it to an integer. bytes_value = column_cell[0] @@ -651,14 +677,15 @@ def _gc_rule_to_dict(gc_rule): if gc_rule is None: result = {} elif isinstance(gc_rule, MaxAgeGCRule): - result = {'time_to_live': gc_rule.max_age.total_seconds()} + result = {"time_to_live": gc_rule.max_age.total_seconds()} elif isinstance(gc_rule, MaxVersionsGCRule): - result = {'max_versions': gc_rule.max_num_versions} + result = {"max_versions": gc_rule.max_num_versions} elif isinstance(gc_rule, GCRuleIntersection): if len(gc_rule.rules) == 2: rule1, rule2 = gc_rule.rules - if (isinstance(rule1, _SIMPLE_GC_RULES) and - isinstance(rule2, _SIMPLE_GC_RULES)): + if isinstance(rule1, _SIMPLE_GC_RULES) and isinstance( + rule2, _SIMPLE_GC_RULES + ): rule1 = _gc_rule_to_dict(rule1) rule2 = _gc_rule_to_dict(rule2) key1, = rule1.keys() @@ -682,7 +709,7 @@ def _next_char(str_val, index): in ``str_val``. """ ord_val = six.indexbytes(str_val, index) - return _to_bytes(chr(ord_val + 1), encoding='latin-1') + return _to_bytes(chr(ord_val + 1), encoding="latin-1") def _string_successor(str_val): @@ -703,18 +730,18 @@ def _string_successor(str_val): :rtype: str :returns: The next string in lexical order after ``str_val``. """ - str_val = _to_bytes(str_val, encoding='latin-1') - if str_val == b'': + str_val = _to_bytes(str_val, encoding="latin-1") + if str_val == b"": return str_val index = len(str_val) - 1 while index >= 0: - if six.indexbytes(str_val, index) != 0xff: + if six.indexbytes(str_val, index) != 0xFF: break index -= 1 if index == -1: - return b'' + return b"" return str_val[:index] + _next_char(str_val, index) @@ -816,16 +843,14 @@ def _partial_row_to_dict(partial_row_data, include_timestamp=False): """ result = {} for column, cells in six.iteritems(partial_row_data.to_dict()): - cell_vals = _cells_to_pairs(cells, - include_timestamp=include_timestamp) + cell_vals = _cells_to_pairs(cells, include_timestamp=include_timestamp) # NOTE: We assume there is exactly 1 version since we used that in # our filter, but we don't check this. result[column] = cell_vals[0] return result -def _filter_chain_helper(column=None, versions=None, timestamp=None, - filters=None): +def _filter_chain_helper(column=None, versions=None, timestamp=None, filters=None): """Create filter chain to limit a results set. :type column: str @@ -854,8 +879,8 @@ def _filter_chain_helper(column=None, versions=None, timestamp=None, if column is not None: if isinstance(column, six.binary_type): - column = column.decode('utf-8') - column_family_id, column_qualifier = column.split(':') + column = column.decode("utf-8") + column_family_id, column_qualifier = column.split(":") fam_filter = FamilyNameRegexFilter(column_family_id) qual_filter = ColumnQualifierRegexFilter(column_qualifier) filters.extend([fam_filter, qual_filter]) @@ -867,44 +892,50 @@ def _filter_chain_helper(column=None, versions=None, timestamp=None, num_filters = len(filters) if num_filters == 0: - raise ValueError('Must have at least one filter.') + raise ValueError("Must have at least one filter.") elif num_filters == 1: return filters[0] else: return RowFilterChain(filters=filters) -def _scan_filter_helper(row_start, row_stop, row_prefix, columns, - timestamp, limit, kwargs): +def _scan_filter_helper( + row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs +): """Helper for :meth:`scan`: build up a filter chain.""" - filter_ = kwargs.pop('filter', None) + filter_ = kwargs.pop("filter", None) legacy_args = [] - for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): + for kw_name in ("batch_size", "scan_batching", "sorted_columns"): if kw_name in kwargs: legacy_args.append(kw_name) kwargs.pop(kw_name) if legacy_args: - legacy_args = ', '.join(legacy_args) - message = ('The HappyBase legacy arguments %s were used. These ' - 'arguments are unused by google-cloud.' % (legacy_args,)) + legacy_args = ", ".join(legacy_args) + message = ( + "The HappyBase legacy arguments %s were used. These " + "arguments are unused by google-cloud." % (legacy_args,) + ) warnings.warn(message) if kwargs: - raise TypeError('Received unexpected arguments', kwargs.keys()) + raise TypeError("Received unexpected arguments", kwargs.keys()) if limit is not None and limit < 1: - raise ValueError('limit must be positive') + raise ValueError("limit must be positive") if row_prefix is not None: if row_start is not None or row_stop is not None: - raise ValueError('row_prefix cannot be combined with ' - 'row_start or row_stop') + raise ValueError( + "row_prefix cannot be combined with " "row_start or row_stop" + ) row_start = row_prefix row_stop = _string_successor(row_prefix) filters = [] if isinstance(filter_, six.string_types): - raise TypeError('Specifying filters as a string is not supported ' - 'by Cloud Bigtable. Use a ' - 'google.cloud.bigtable.row.RowFilter instead.') + raise TypeError( + "Specifying filters as a string is not supported " + "by Cloud Bigtable. Use a " + "google.cloud.bigtable.row.RowFilter instead." + ) elif filter_ is not None: filters.append(filter_) @@ -912,8 +943,7 @@ def _scan_filter_helper(row_start, row_stop, row_prefix, columns, filters.append(_columns_filter_helper(columns)) # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, filters=filters) return row_start, row_stop, filter_ @@ -937,15 +967,14 @@ def _columns_filter_helper(columns): fam_filter = FamilyNameRegexFilter(column_family_id) if column_qualifier is not None: qual_filter = ColumnQualifierRegexFilter(column_qualifier) - combined_filter = RowFilterChain( - filters=[fam_filter, qual_filter]) + combined_filter = RowFilterChain(filters=[fam_filter, qual_filter]) filters.append(combined_filter) else: filters.append(fam_filter) num_filters = len(filters) if num_filters == 0: - raise ValueError('Must have at least one filter.') + raise ValueError("Must have at least one filter.") elif num_filters == 1: return filters[0] else: @@ -956,8 +985,7 @@ def _get_row_set_object(row_start, row_stop): """Return a RowSet object for the given row_start and row_stop """ row_set = RowSet() - row_set.add_row_range_from_keys(start_key=row_start, - end_key=row_stop) + row_set.add_row_range_from_keys(start_key=row_start, end_key=row_stop) return row_set diff --git a/system_tests/attempt_system_tests.py b/system_tests/attempt_system_tests.py index 4aa1b7f..e6376f0 100644 --- a/system_tests/attempt_system_tests.py +++ b/system_tests/attempt_system_tests.py @@ -58,15 +58,13 @@ from run_system_test import run_module_tests -MODULES = ( - 'happybase', -) +MODULES = ("happybase",) SCRIPTS_DIR = os.path.dirname(__file__) -ROOT_DIR = os.path.abspath(os.path.join(SCRIPTS_DIR, '..')) -ENCRYPTED_KEYFILE = os.path.join(ROOT_DIR, 'system_tests', 'key.json.enc') -ENCRYPTED_KEY_ENV = 'encrypted_INVALID_key' -ENCRYPTED_INIT_VECTOR_ENV = 'encrypted_INVALID_iv' +ROOT_DIR = os.path.abspath(os.path.join(SCRIPTS_DIR, "..")) +ENCRYPTED_KEYFILE = os.path.join(ROOT_DIR, "system_tests", "key.json.enc") +ENCRYPTED_KEY_ENV = "encrypted_INVALID_key" +ENCRYPTED_INIT_VECTOR_ENV = "encrypted_INVALID_iv" ALL_MODULES = object() # Sentinel for argparser @@ -80,10 +78,12 @@ def check_environment(): is running in Travis and the second indicates if the current build is a non-PR for a merge to master. """ - if os.getenv('TRAVIS') == 'true': + if os.getenv("TRAVIS") == "true": is_travis = True - non_pr = (os.getenv('TRAVIS_PULL_REQUEST') == 'false' and - os.getenv('TRAVIS_BRANCH') == 'master') + non_pr = ( + os.getenv("TRAVIS_PULL_REQUEST") == "false" + and os.getenv("TRAVIS_BRANCH") == "master" + ) else: is_travis = non_pr = False @@ -92,20 +92,27 @@ def check_environment(): def decrypt_keyfile(): """Decrypt a keyfile.""" - print('Running in Travis during merge, decrypting stored ' - 'key file.') + print("Running in Travis during merge, decrypting stored " "key file.") encrypted_key = os.getenv(ENCRYPTED_KEY_ENV) encrypted_iv = os.getenv(ENCRYPTED_INIT_VECTOR_ENV) out_file = os.getenv(CREDENTIALS) # Convert encrypted key file into decrypted file to be used. - subprocess.call([ - 'openssl', 'aes-256-cbc', - '-K', encrypted_key, - '-iv', encrypted_iv, - '-in', ENCRYPTED_KEYFILE, - '-out', out_file, '-d' - ]) + subprocess.call( + [ + "openssl", + "aes-256-cbc", + "-K", + encrypted_key, + "-iv", + encrypted_iv, + "-in", + ENCRYPTED_KEYFILE, + "-out", + out_file, + "-d", + ] + ) def prepare_to_run(): @@ -124,8 +131,7 @@ def prepare_to_run(): # On a Travis PR, exit the program. if not non_pr: - print('Running in Travis during non-merge to master, ' - 'doing nothing.') + print("Running in Travis during non-merge to master, " "doing nothing.") sys.exit(0) # On a Travis build for a merge commit to master, decrypt. @@ -134,12 +140,9 @@ def prepare_to_run(): def get_parser(): """Get an argument parser to determine a list of packages.""" - parser = argparse.ArgumentParser( - description='google-cloud tests runner.') - help_msg = ('List of packages to be tested. ' - 'If left blank, tests all packages.') - parser.add_argument('packages', nargs='*', - default=ALL_MODULES, help=help_msg) + parser = argparse.ArgumentParser(description="google-cloud tests runner.") + help_msg = "List of packages to be tested. " "If left blank, tests all packages." + parser.add_argument("packages", nargs="*", default=ALL_MODULES, help=help_msg) return parser @@ -159,7 +162,7 @@ def get_modules(): invalid.append(package) if invalid: - msg = 'No system test for packages: ' + ', '.join(invalid) + msg = "No system test for packages: " + ", ".join(invalid) print(msg, file=sys.stderr) sys.exit(1) @@ -181,5 +184,5 @@ def main(): sys.exit(failed_modules) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/system_tests/happybase.py b/system_tests/happybase.py index 7c9aad1..76e7d35 100644 --- a/system_tests/happybase.py +++ b/system_tests/happybase.py @@ -29,36 +29,38 @@ from system_test_utils import unique_resource_id -_PACK_I64 = struct.Struct('>q').pack +_PACK_I64 = struct.Struct(">q").pack _FIRST_ELT = operator.itemgetter(0) -LOCATION_ID = 'us-central1-c' +LOCATION_ID = "us-central1-c" # NOTE: Avoid using the same name as in bigtable.py -INSTANCE_ID = 'gcl-hb' + unique_resource_id('-') -CLUSTER_ID = 'gcl-hb-c1' + unique_resource_id('-') +INSTANCE_ID = "gcl-hb" + unique_resource_id("-") +CLUSTER_ID = "gcl-hb-c1" + unique_resource_id("-") SERVER_NODES = 3 -TABLE_NAME = 'table-name' -ALT_TABLE_NAME = 'other-table' +TABLE_NAME = "table-name" +ALT_TABLE_NAME = "other-table" TTL_FOR_TEST = 3 -COL_FAM1 = 'cf1' -COL_FAM2 = 'cf2' -COL_FAM3 = 'cf3' +COL_FAM1 = "cf1" +COL_FAM2 = "cf2" +COL_FAM3 = "cf3" FAMILIES = { - COL_FAM1: {'max_versions': 10}, - COL_FAM2: {'max_versions': 1, 'time_to_live': TTL_FOR_TEST}, + COL_FAM1: {"max_versions": 10}, + COL_FAM2: {"max_versions": 1, "time_to_live": TTL_FOR_TEST}, COL_FAM3: {}, # use defaults } -ROW_KEY1 = b'row-key1' -ROW_KEY2 = b'row-key2a' -ROW_KEY3 = b'row-key2b' -COL1 = (COL_FAM1 + ':qual1').encode('utf-8') -COL2 = (COL_FAM1 + ':qual2').encode('utf-8') -COL3 = (COL_FAM2 + ':qual1').encode('utf-8') -COL4 = (COL_FAM3 + ':qual3').encode('utf-8') - -LABEL_KEY = u'python-system-happybase' -label_stamp = datetime.datetime.utcnow() \ - .replace(microsecond=0, tzinfo=UTC,) \ - .strftime("%Y-%m-%dt%H-%M-%S") +ROW_KEY1 = b"row-key1" +ROW_KEY2 = b"row-key2a" +ROW_KEY3 = b"row-key2b" +COL1 = (COL_FAM1 + ":qual1").encode("utf-8") +COL2 = (COL_FAM1 + ":qual2").encode("utf-8") +COL3 = (COL_FAM2 + ":qual1").encode("utf-8") +COL4 = (COL_FAM3 + ":qual3").encode("utf-8") + +LABEL_KEY = u"python-system-happybase" +label_stamp = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") +) LABELS = {LABEL_KEY: str(label_stamp)} @@ -68,6 +70,7 @@ class Config(object): This is a mutable stand-in to allow test set-up to modify global state. """ + CONNECTION = None TABLE = None @@ -104,8 +107,9 @@ def _wait_until_complete(operation, max_attempts=5): def set_connection(): client = client_mod.Client(admin=True) instance = client.instance(INSTANCE_ID, labels=LABELS) - cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID, - serve_nodes=SERVER_NODES) + cluster = instance.cluster( + CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVER_NODES + ) operation = instance.create(clusters=[cluster]) operation.result(10) Config.CONNECTION = Connection(instance=instance) @@ -124,7 +128,6 @@ def tearDownModule(): class TestConnection(unittest.TestCase): - def test_create_and_delete_table(self): connection = Config.CONNECTION @@ -145,7 +148,6 @@ def test_create_table_failure(self): class BaseTableTest(unittest.TestCase): - def setUp(self): self.rows_to_delete = [] @@ -155,7 +157,6 @@ def tearDown(self): class TestTable_families(BaseTableTest): - def test_families(self): families = Config.TABLE.families() @@ -167,7 +168,6 @@ def test_families(self): class TestTable_row(BaseTableTest): - def test_row_when_empty(self): row1 = Config.TABLE.row(ROW_KEY1) row2 = Config.TABLE.row(ROW_KEY2) @@ -177,16 +177,11 @@ def test_row_when_empty(self): def test_row_with_columns(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' - value4 = b'value4' - row1_data = { - COL1: value1, - COL2: value2, - COL3: value3, - COL4: value4, - } + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" + value4 = b"value4" + row1_data = {COL1: value1, COL2: value2, COL3: value3, COL4: value4} # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -198,31 +193,25 @@ def test_row_with_columns(self): # Pick out specific columns. row1_diff_fams = table.row(ROW_KEY1, columns=[COL1, COL4]) - self.assertEqual(row1_diff_fams, {COL1: value1, - COL4: value4}) + self.assertEqual(row1_diff_fams, {COL1: value1, COL4: value4}) row1_single_col = table.row(ROW_KEY1, columns=[COL3]) self.assertEqual(row1_single_col, {COL3: value3}) row1_col_fam = table.row(ROW_KEY1, columns=[COL_FAM1]) - self.assertEqual(row1_col_fam, {COL1: value1, - COL2: value2}) + self.assertEqual(row1_col_fam, {COL1: value1, COL2: value2}) row1_fam_qual_overlap1 = table.row(ROW_KEY1, columns=[COL1, COL_FAM1]) - self.assertEqual(row1_fam_qual_overlap1, {COL1: value1, - COL2: value2}) + self.assertEqual(row1_fam_qual_overlap1, {COL1: value1, COL2: value2}) row1_fam_qual_overlap2 = table.row(ROW_KEY1, columns=[COL_FAM1, COL1]) - self.assertEqual(row1_fam_qual_overlap2, - {COL1: value1, COL2: value2}) - row1_multiple_col_fams = table.row(ROW_KEY1, - columns=[COL_FAM1, COL_FAM2]) - self.assertEqual(row1_multiple_col_fams, - {COL1: value1, - COL2: value2, - COL3: value3}) + self.assertEqual(row1_fam_qual_overlap2, {COL1: value1, COL2: value2}) + row1_multiple_col_fams = table.row(ROW_KEY1, columns=[COL_FAM1, COL_FAM2]) + self.assertEqual( + row1_multiple_col_fams, {COL1: value1, COL2: value2, COL3: value3} + ) def test_row_with_timestamp(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -236,37 +225,25 @@ def test_row_with_timestamp(self): ts2 = row1[COL2][1] ts3 = row1[COL3][1] - expected_row = { - COL1: (value1, ts1), - COL2: (value2, ts2), - COL3: (value3, ts3), - } + expected_row = {COL1: (value1, ts1), COL2: (value2, ts2), COL3: (value3, ts3)} self.assertEqual(row1, expected_row) # Make sure the timestamps are (strictly) ascending. self.assertTrue(ts1 < ts2 < ts3) # Use timestamps to retrieve row. - first_two = table.row(ROW_KEY1, timestamp=ts2 + 1, - include_timestamp=True) - self.assertEqual(first_two, { - COL1: (value1, ts1), - COL2: (value2, ts2), - }) - first_one = table.row(ROW_KEY1, timestamp=ts2, - include_timestamp=True) - self.assertEqual(first_one, { - COL1: (value1, ts1), - }) + first_two = table.row(ROW_KEY1, timestamp=ts2 + 1, include_timestamp=True) + self.assertEqual(first_two, {COL1: (value1, ts1), COL2: (value2, ts2)}) + first_one = table.row(ROW_KEY1, timestamp=ts2, include_timestamp=True) + self.assertEqual(first_one, {COL1: (value1, ts1)}) class TestTable_rows(BaseTableTest): - def test_rows(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" row1_data = {COL1: value1, COL2: value2} row2_data = {COL1: value3} @@ -283,9 +260,9 @@ def test_rows(self): def test_rows_with_returned_timestamps(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" row1_data = {COL1: value1, COL2: value2} row2_data = {COL1: value3} @@ -296,8 +273,9 @@ def test_rows_with_returned_timestamps(self): batch.put(ROW_KEY1, row1_data) batch.put(ROW_KEY2, row2_data) - rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), - key=_FIRST_ELT) + rows = sorted( + table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), key=_FIRST_ELT + ) row1, row2 = rows self.assertEqual(row1[0], ROW_KEY1) self.assertEqual(row2[0], ROW_KEY2) @@ -308,8 +286,7 @@ def test_rows_with_returned_timestamps(self): ts = row1[COL1][1] # All will have the same timestamp since we used batch. - expected_row1_result = {COL1: (value1, ts), - COL2: (value2, ts)} + expected_row1_result = {COL1: (value1, ts), COL2: (value2, ts)} self.assertEqual(row1, expected_row1_result) # NOTE: This method was written before Cloud Bigtable had the concept # of batching, so each mutation is sent individually. (This @@ -324,9 +301,9 @@ def test_rows_with_returned_timestamps(self): def test_rows_with_columns(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" row1_data = {COL1: value1, COL2: value2} row2_data = {COL1: value3} @@ -337,8 +314,9 @@ def test_rows_with_columns(self): table.put(ROW_KEY2, row2_data) # Filter a single column present in both rows. - rows_col1 = sorted(table.rows([ROW_KEY1, ROW_KEY2], columns=[COL1]), - key=_FIRST_ELT) + rows_col1 = sorted( + table.rows([ROW_KEY1, ROW_KEY2], columns=[COL1]), key=_FIRST_ELT + ) row1, row2 = rows_col1 self.assertEqual(row1, (ROW_KEY1, {COL1: value1})) self.assertEqual(row2, (ROW_KEY2, {COL1: value3})) @@ -349,8 +327,8 @@ def test_rows_with_columns(self): # Filter a column family. rows_col_fam1 = sorted( - table.rows([ROW_KEY1, ROW_KEY2], columns=[COL_FAM1]), - key=_FIRST_ELT) + table.rows([ROW_KEY1, ROW_KEY2], columns=[COL_FAM1]), key=_FIRST_ELT + ) row1, row2 = rows_col_fam1 self.assertEqual(row1, (ROW_KEY1, row1_data)) self.assertEqual(row2, (ROW_KEY2, row2_data)) @@ -360,27 +338,27 @@ def test_rows_with_columns(self): self.assertEqual(rows_col_fam2, []) # Filter a column family that overlaps with a column. - rows_col_fam_overlap1 = sorted(table.rows([ROW_KEY1, ROW_KEY2], - columns=[COL1, COL_FAM1]), - key=_FIRST_ELT) + rows_col_fam_overlap1 = sorted( + table.rows([ROW_KEY1, ROW_KEY2], columns=[COL1, COL_FAM1]), key=_FIRST_ELT + ) row1, row2 = rows_col_fam_overlap1 self.assertEqual(row1, (ROW_KEY1, row1_data)) self.assertEqual(row2, (ROW_KEY2, row2_data)) # Filter a column family that overlaps with a column (opposite order). - rows_col_fam_overlap2 = sorted(table.rows([ROW_KEY1, ROW_KEY2], - columns=[COL_FAM1, COL1]), - key=_FIRST_ELT) + rows_col_fam_overlap2 = sorted( + table.rows([ROW_KEY1, ROW_KEY2], columns=[COL_FAM1, COL1]), key=_FIRST_ELT + ) row1, row2 = rows_col_fam_overlap2 self.assertEqual(row1, (ROW_KEY1, row1_data)) self.assertEqual(row2, (ROW_KEY2, row2_data)) def test_rows_with_timestamp(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' - value4 = b'value4' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" + value4 = b"value4" # Need to clean-up row1 and row2 after. self.rows_to_delete.append(ROW_KEY1) @@ -391,8 +369,9 @@ def test_rows_with_timestamp(self): table.put(ROW_KEY1, {COL4: value4}) # Just grab the timestamps - rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), - key=_FIRST_ELT) + rows = sorted( + table.rows([ROW_KEY1, ROW_KEY2], include_timestamp=True), key=_FIRST_ELT + ) row1, row2 = rows self.assertEqual(row1[0], ROW_KEY1) self.assertEqual(row2[0], ROW_KEY2) @@ -407,39 +386,37 @@ def test_rows_with_timestamp(self): self.assertTrue(ts1 < ts2 < ts3 < ts4) # Rows before the third timestamp (assumes exclusive endpoint). - rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts3, - include_timestamp=True), - key=_FIRST_ELT) + rows = sorted( + table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts3, include_timestamp=True), + key=_FIRST_ELT, + ) row1, row2 = rows self.assertEqual(row1, (ROW_KEY1, {COL1: (value1, ts1)})) self.assertEqual(row2, (ROW_KEY2, {COL1: (value2, ts2)})) # All writes (bump the exclusive endpoint by 1 millisecond). - rows = sorted(table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts4 + 1, - include_timestamp=True), - key=_FIRST_ELT) + rows = sorted( + table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts4 + 1, include_timestamp=True), + key=_FIRST_ELT, + ) row1, row2 = rows - row1_all_data = { - COL1: (value1, ts1), - COL2: (value3, ts3), - COL4: (value4, ts4), - } + row1_all_data = {COL1: (value1, ts1), COL2: (value3, ts3), COL4: (value4, ts4)} self.assertEqual(row1, (ROW_KEY1, row1_all_data)) self.assertEqual(row2, (ROW_KEY2, {COL1: (value2, ts2)})) # First three writes, restricted to column 2. - rows = table.rows([ROW_KEY1, ROW_KEY2], timestamp=ts4, - columns=[COL2], include_timestamp=True) + rows = table.rows( + [ROW_KEY1, ROW_KEY2], timestamp=ts4, columns=[COL2], include_timestamp=True + ) self.assertEqual(rows, [(ROW_KEY1, {COL2: (value3, ts3)})]) class TestTable_cells(BaseTableTest): - def test_cells(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -458,35 +435,32 @@ def test_cells(self): ts3 = all_cells[0][1] ts2 = all_cells[1][1] ts1 = all_cells[2][1] - self.assertEqual(all_cells, - [(value3, ts3), (value2, ts2), (value1, ts1)]) + self.assertEqual(all_cells, [(value3, ts3), (value2, ts2), (value1, ts1)]) # Limit to the two latest cells. - latest_two = table.cells(ROW_KEY1, COL1, include_timestamp=True, - versions=2) + latest_two = table.cells(ROW_KEY1, COL1, include_timestamp=True, versions=2) self.assertEqual(latest_two, [(value3, ts3), (value2, ts2)]) # Limit to cells before the 2nd timestamp (inclusive). - first_two = table.cells(ROW_KEY1, COL1, include_timestamp=True, - timestamp=ts2 + 1) + first_two = table.cells( + ROW_KEY1, COL1, include_timestamp=True, timestamp=ts2 + 1 + ) self.assertEqual(first_two, [(value2, ts2), (value1, ts1)]) # Limit to cells before the 2nd timestamp (exclusive). - first_cell = table.cells(ROW_KEY1, COL1, include_timestamp=True, - timestamp=ts2) + first_cell = table.cells(ROW_KEY1, COL1, include_timestamp=True, timestamp=ts2) self.assertEqual(first_cell, [(value1, ts1)]) class TestTable_scan(BaseTableTest): - def test_scan_when_empty(self): scan_result = list(Config.TABLE.scan()) self.assertEqual(scan_result, []) def test_scan_single_row(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -497,8 +471,7 @@ def test_scan_single_row(self): self.assertEqual(scan_result, [(ROW_KEY1, row1_data)]) scan_result_cols = list(table.scan(columns=[COL1])) - self.assertEqual(scan_result_cols, [(ROW_KEY1, - {COL1: value1})]) + self.assertEqual(scan_result_cols, [(ROW_KEY1, {COL1: value1})]) scan_result_ts = list(table.scan(include_timestamp=True)) self.assertEqual(len(scan_result_ts), 1) @@ -506,18 +479,16 @@ def test_scan_single_row(self): self.assertEqual(only_row[0], ROW_KEY1) row_values = only_row[1] ts = row_values[COL1][1] - self.assertEqual(row_values, - {COL1: (value1, ts), - COL2: (value2, ts)}) + self.assertEqual(row_values, {COL1: (value1, ts), COL2: (value2, ts)}) def test_scan_filters(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' - value4 = b'value4' - value5 = b'value5' - value6 = b'value6' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" + value4 = b"value4" + value5 = b"value5" + value6 = b"value6" row1_data = {COL1: value1, COL2: value2} row2_data = {COL2: value3, COL3: value4} row3_data = {COL3: value5, COL4: value6} @@ -532,26 +503,22 @@ def test_scan_filters(self): # Basic scan (no filters) scan_result = list(table.scan()) - self.assertEqual(scan_result, [ - (ROW_KEY1, row1_data), - (ROW_KEY2, row2_data), - (ROW_KEY3, row3_data), - ]) + self.assertEqual( + scan_result, + [(ROW_KEY1, row1_data), (ROW_KEY2, row2_data), (ROW_KEY3, row3_data)], + ) # Limit the size of the scan scan_result = list(table.scan(limit=1)) - self.assertEqual(scan_result, [ - (ROW_KEY1, row1_data), - ]) + self.assertEqual(scan_result, [(ROW_KEY1, row1_data)]) # Scan with a row prefix. prefix = ROW_KEY2[:-1] self.assertEqual(prefix, ROW_KEY3[:-1]) scan_result_prefixed = list(table.scan(row_prefix=prefix)) - self.assertEqual(scan_result_prefixed, [ - (ROW_KEY2, row2_data), - (ROW_KEY3, row3_data), - ]) + self.assertEqual( + scan_result_prefixed, [(ROW_KEY2, row2_data), (ROW_KEY3, row3_data)] + ) # Make sure our keys are sorted in order row_keys = [ROW_KEY1, ROW_KEY2, ROW_KEY3] @@ -559,33 +526,31 @@ def test_scan_filters(self): # row_start alone (inclusive) scan_result_row_start = list(table.scan(row_start=ROW_KEY2)) - self.assertEqual(scan_result_row_start, [ - (ROW_KEY2, row2_data), - (ROW_KEY3, row3_data), - ]) + self.assertEqual( + scan_result_row_start, [(ROW_KEY2, row2_data), (ROW_KEY3, row3_data)] + ) # row_stop alone (exclusive) scan_result_row_stop = list(table.scan(row_stop=ROW_KEY2)) - self.assertEqual(scan_result_row_stop, [ - (ROW_KEY1, row1_data), - ]) + self.assertEqual(scan_result_row_stop, [(ROW_KEY1, row1_data)]) # Both row_start and row_stop scan_result_row_stop_and_start = list( - table.scan(row_start=ROW_KEY1, row_stop=ROW_KEY3)) - self.assertEqual(scan_result_row_stop_and_start, [ - (ROW_KEY1, row1_data), - (ROW_KEY2, row2_data), - ]) + table.scan(row_start=ROW_KEY1, row_stop=ROW_KEY3) + ) + self.assertEqual( + scan_result_row_stop_and_start, + [(ROW_KEY1, row1_data), (ROW_KEY2, row2_data)], + ) def test_scan_timestamp(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' - value4 = b'value4' - value5 = b'value5' - value6 = b'value6' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" + value4 = b"value4" + value5 = b"value5" + value6 = b"value6" # Need to clean-up row1/2/3 after. self.rows_to_delete.append(ROW_KEY1) @@ -620,43 +585,39 @@ def test_scan_timestamp(self): ts5 = row3[COL3][1] ts6 = row1[COL1][1] - self.assertEqual(row1, {COL1: (value1, ts6), - COL2: (value2, ts4)}) - self.assertEqual(row2, {COL2: (value3, ts3), - COL3: (value4, ts2)}) - self.assertEqual(row3, {COL3: (value5, ts5), - COL4: (value6, ts1)}) + self.assertEqual(row1, {COL1: (value1, ts6), COL2: (value2, ts4)}) + self.assertEqual(row2, {COL2: (value3, ts3), COL3: (value4, ts2)}) + self.assertEqual(row3, {COL3: (value5, ts5), COL4: (value6, ts1)}) # All cells before ts1 (exclusive) - scan_result_before_ts1 = list(table.scan(timestamp=ts1, - include_timestamp=True)) + scan_result_before_ts1 = list(table.scan(timestamp=ts1, include_timestamp=True)) self.assertEqual(scan_result_before_ts1, []) # All cells before ts2 (inclusive) - scan_result_before_ts2 = list(table.scan(timestamp=ts2 + 1, - include_timestamp=True)) - self.assertEqual(scan_result_before_ts2, [ - (ROW_KEY2, {COL3: (value4, ts2)}), - (ROW_KEY3, {COL4: (value6, ts1)}), - ]) + scan_result_before_ts2 = list( + table.scan(timestamp=ts2 + 1, include_timestamp=True) + ) + self.assertEqual( + scan_result_before_ts2, + [(ROW_KEY2, {COL3: (value4, ts2)}), (ROW_KEY3, {COL4: (value6, ts1)})], + ) # All cells before ts6 (exclusive) - scan_result_before_ts6 = list(table.scan(timestamp=ts6, - include_timestamp=True)) - self.assertEqual(scan_result_before_ts6, [ - (ROW_KEY1, {COL2: (value2, ts4)}), - (ROW_KEY2, {COL2: (value3, ts3), - COL3: (value4, ts2)}), - (ROW_KEY3, {COL3: (value5, ts5), - COL4: (value6, ts1)}), - ]) + scan_result_before_ts6 = list(table.scan(timestamp=ts6, include_timestamp=True)) + self.assertEqual( + scan_result_before_ts6, + [ + (ROW_KEY1, {COL2: (value2, ts4)}), + (ROW_KEY2, {COL2: (value3, ts3), COL3: (value4, ts2)}), + (ROW_KEY3, {COL3: (value5, ts5), COL4: (value6, ts1)}), + ], + ) class TestTable_put(BaseTableTest): - def test_put(self): - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -672,13 +633,15 @@ def test_put(self): timestamp2 = row1[COL2][1] self.assertEqual(timestamp1, timestamp2) - row1_data_with_timestamps = {COL1: (value1, timestamp1), - COL2: (value2, timestamp2)} + row1_data_with_timestamps = { + COL1: (value1, timestamp1), + COL2: (value2, timestamp2), + } self.assertEqual(row1, row1_data_with_timestamps) def test_put_with_timestamp(self): - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" row1_data = {COL1: value1, COL2: value2} ts = 1461367402 @@ -688,17 +651,15 @@ def test_put_with_timestamp(self): # Check again, but this time with timestamps. row1 = Config.TABLE.row(ROW_KEY1, include_timestamp=True) - row1_data_with_timestamps = {COL1: (value1, ts), - COL2: (value2, ts)} + row1_data_with_timestamps = {COL1: (value1, ts), COL2: (value2, ts)} self.assertEqual(row1, row1_data_with_timestamps) class TestTable_delete(BaseTableTest): - def test_delete(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -714,8 +675,8 @@ def test_delete(self): def test_delete_with_columns(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -731,9 +692,9 @@ def test_delete_with_columns(self): def test_delete_with_column_family(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' - value3 = b'value3' + value1 = b"value1" + value2 = b"value2" + value3 = b"value3" row1_data = {COL1: value1, COL2: value2, COL4: value3} # Need to clean-up row1 after. @@ -749,8 +710,8 @@ def test_delete_with_column_family(self): def test_delete_with_columns_family_overlap(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" row1_data = {COL1: value1, COL2: value2} # Need to clean-up row1 after. @@ -776,8 +737,8 @@ def test_delete_with_columns_family_overlap(self): def test_delete_with_timestamp(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -806,13 +767,12 @@ def test_delete_with_timestamp(self): # creating Batch._delete_range. table.delete(ROW_KEY1, columns=[COL1, COL2], timestamp=ts1) row1_after_incl_delete = table.row(ROW_KEY1, include_timestamp=True) - self.assertEqual(row1_after_incl_delete, - {COL2: (value2, ts2)}) + self.assertEqual(row1_after_incl_delete, {COL2: (value2, ts2)}) def test_delete_with_columns_and_timestamp(self): table = Config.TABLE - value1 = b'value1' - value2 = b'value2' + value1 = b"value1" + value2 = b"value2" # Need to clean-up row1 after. self.rows_to_delete.append(ROW_KEY1) @@ -845,7 +805,6 @@ def test_delete_with_columns_and_timestamp(self): class TestTableCounterMethods(BaseTableTest): - def test_counter_get(self): table = Config.TABLE @@ -856,8 +815,7 @@ def test_counter_get(self): initial_counter = table.counter_get(ROW_KEY1, COL1) self.assertEqual(initial_counter, 0) - self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), - {COL1: _PACK_I64(0)}) + self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), {COL1: _PACK_I64(0)}) def test_counter_inc(self): table = Config.TABLE @@ -874,8 +832,9 @@ def test_counter_inc(self): self.assertEqual(updated_counter, inc_value) # Check that the value is set (does not seem to occur on HBase). - self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), - {COL1: _PACK_I64(inc_value)}) + self.assertEqual( + table.row(ROW_KEY1, columns=[COL1]), {COL1: _PACK_I64(inc_value)} + ) def test_counter_dec(self): table = Config.TABLE @@ -892,5 +851,6 @@ def test_counter_dec(self): self.assertEqual(updated_counter, -dec_value) # Check that the value is set (does not seem to occur on HBase). - self.assertEqual(table.row(ROW_KEY1, columns=[COL1]), - {COL1: _PACK_I64(-dec_value)}) + self.assertEqual( + table.row(ROW_KEY1, columns=[COL1]), {COL1: _PACK_I64(-dec_value)} + ) diff --git a/system_tests/retry.py b/system_tests/retry.py index d4694f6..674c94b 100644 --- a/system_tests/retry.py +++ b/system_tests/retry.py @@ -47,8 +47,8 @@ class RetryBase(object): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): + + def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, logger=None): self.max_tries = max_tries self.delay = delay self.backoff = backoff @@ -79,9 +79,16 @@ class RetryErrors(RetryBase): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - def __init__(self, exception, error_predicate=_retry_all, - max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): + + def __init__( + self, + exception, + error_predicate=_retry_all, + max_tries=MAX_TRIES, + delay=DELAY, + backoff=BACKOFF, + logger=None, + ): super(RetryErrors, self).__init__(max_tries, delay, backoff, logger) self.exception = exception self.error_predicate = error_predicate @@ -98,9 +105,11 @@ def wrapped_function(*args, **kwargs): if not self.error_predicate(caught_exception): raise - delay = self.delay * self.backoff**tries - msg = ("%s, Trying again in %d seconds..." % - (caught_exception, delay)) + delay = self.delay * self.backoff ** tries + msg = "%s, Trying again in %d seconds..." % ( + caught_exception, + delay, + ) self.logger(msg) time.sleep(delay) @@ -130,9 +139,15 @@ class RetryResult(RetryBase): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - def __init__(self, result_predicate, - max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): + + def __init__( + self, + result_predicate, + max_tries=MAX_TRIES, + delay=DELAY, + backoff=BACKOFF, + logger=None, + ): super(RetryResult, self).__init__(max_tries, delay, backoff, logger) self.result_predicate = result_predicate @@ -145,9 +160,11 @@ def wrapped_function(*args, **kwargs): if self.result_predicate(result): return result - delay = self.delay * self.backoff**tries + delay = self.delay * self.backoff ** tries msg = "%s. Trying again in %d seconds..." % ( - self.result_predicate.__name__, delay,) + self.result_predicate.__name__, + delay, + ) self.logger(msg) time.sleep(delay) @@ -177,15 +194,20 @@ class RetryInstanceState(RetryBase): :type logger: logging.Logger instance :param logger: Logger to use. If None, print. """ - def __init__(self, instance_predicate, - max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): - super(RetryInstanceState, self).__init__( - max_tries, delay, backoff, logger) + + def __init__( + self, + instance_predicate, + max_tries=MAX_TRIES, + delay=DELAY, + backoff=BACKOFF, + logger=None, + ): + super(RetryInstanceState, self).__init__(max_tries, delay, backoff, logger) self.instance_predicate = instance_predicate def __call__(self, to_wrap): - instance = to_wrap.__self__ # only instance methods allowed + instance = to_wrap.__self__ # only instance methods allowed @wraps(to_wrap) def wrapped_function(*args, **kwargs): @@ -195,9 +217,11 @@ def wrapped_function(*args, **kwargs): if self.instance_predicate(instance): return result - delay = self.delay * self.backoff**tries + delay = self.delay * self.backoff ** tries msg = "%s. Trying again in %d seconds..." % ( - self.instance_predicate.__name__, delay,) + self.instance_predicate.__name__, + delay, + ) self.logger(msg) time.sleep(delay) diff --git a/system_tests/run_system_test.py b/system_tests/run_system_test.py index 2ee4235..e232202 100644 --- a/system_tests/run_system_test.py +++ b/system_tests/run_system_test.py @@ -20,9 +20,7 @@ import system_test_utils -TEST_MODULES = { - 'happybase': happybase, -} +TEST_MODULES = {"happybase": happybase} class FailedSystemTestModule(Exception): @@ -31,14 +29,21 @@ class FailedSystemTestModule(Exception): def get_parser(): parser = argparse.ArgumentParser( - description='Google Cloud test runner against actual project.') - parser.add_argument('--package', dest='package', - choices=TEST_MODULES.keys(), - default='datastore', help='Package to be tested.') + description="Google Cloud test runner against actual project." + ) parser.add_argument( - '--ignore-requirements', - dest='ignore_requirements', action='store_true', - help='Ignore the credentials requirement for the test.') + "--package", + dest="package", + choices=TEST_MODULES.keys(), + default="datastore", + help="Package to be tested.", + ) + parser.add_argument( + "--ignore-requirements", + dest="ignore_requirements", + action="store_true", + help="Ignore the credentials requirement for the test.", + ) return parser @@ -63,11 +68,10 @@ def main(): parser = get_parser() args = parser.parse_args() try: - run_module_tests(args.package, - ignore_requirements=args.ignore_requirements) + run_module_tests(args.package, ignore_requirements=args.ignore_requirements) except FailedSystemTestModule: sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/system_tests/system_test_utils.py b/system_tests/system_test_utils.py index cf913d4..79c8d47 100644 --- a/system_tests/system_test_utils.py +++ b/system_tests/system_test_utils.py @@ -44,10 +44,9 @@ def create_scoped_required(): def check_environ(): err_msg = None if CREDENTIALS is None: - err_msg = '\nMissing variables: ' + TEST_CREDENTIALS + err_msg = "\nMissing variables: " + TEST_CREDENTIALS elif not os.path.isfile(CREDENTIALS): - err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS, - CREDENTIALS) + err_msg = "\nThe %s path %r is not a file." % (TEST_CREDENTIALS, CREDENTIALS) if err_msg is not None: msg = ENVIRON_ERROR_MSG + err_msg @@ -55,15 +54,14 @@ def check_environ(): sys.exit(1) -def unique_resource_id(delimiter='_'): +def unique_resource_id(delimiter="_"): """A unique identifier for a resource. Intended to help locate resources created in particular testing environments and at particular times. """ - build_id = os.getenv('TRAVIS_BUILD_ID', '') - if build_id == '': - return '%s%d' % (delimiter, 1000 * time.time()) + build_id = os.getenv("TRAVIS_BUILD_ID", "") + if build_id == "": + return "%s%d" % (delimiter, 1000 * time.time()) - return '%s%s%s%d' % (delimiter, build_id, - delimiter, time.time()) + return "%s%s%s%d" % (delimiter, build_id, delimiter, time.time()) diff --git a/unit_tests/test_batch.py b/unit_tests/test_batch.py index 9c3aabe..8744562 100644 --- a/unit_tests/test_batch.py +++ b/unit_tests/test_batch.py @@ -25,9 +25,9 @@ def send(self): class TestBatch(unittest.TestCase): - def _get_target_class(self): from google.cloud.happybase.batch import Batch + return Batch def _make_one(self, *args, **kwargs): @@ -54,12 +54,13 @@ def test_constructor_explicit(self): transaction = False # Must be False when batch_size is non-null batch = self._make_one( - table, timestamp=timestamp, - batch_size=batch_size, transaction=transaction) + table, timestamp=timestamp, batch_size=batch_size, transaction=transaction + ) self.assertEqual(batch._table, table) self.assertEqual(batch._batch_size, batch_size) - self.assertEqual(batch._timestamp, - _datetime_from_microseconds(1000 * timestamp)) + self.assertEqual( + batch._timestamp, _datetime_from_microseconds(1000 * timestamp) + ) next_timestamp = _datetime_from_microseconds(1000 * (timestamp + 1)) time_range = TimestampRange(end=next_timestamp) @@ -94,8 +95,7 @@ def test_constructor_with_batch_size_and_transactional(self): batch_size = 1 transaction = True with self.assertRaises(TypeError): - self._make_one( - table, batch_size=batch_size, transaction=transaction) + self._make_one(table, batch_size=batch_size, transaction=transaction) def test_send(self): low_level_table = _MockLowLevelTable() @@ -103,8 +103,8 @@ def test_send(self): batch = self._make_one(table) batch._row_map = row_map = _MockRowMap() - row_map['row-key1'] = row1 = _MockRow() - row_map['row-key2'] = row2 = _MockRow() + row_map["row-key1"] = row1 = _MockRow() + row_map["row-key2"] = row2 = _MockRow() batch._mutation_count = 1337 self.assertEqual(row_map.clear_count, 0) @@ -174,7 +174,7 @@ def test__get_row_exists(self): table = object() batch = self._make_one(table) - row_key = 'row-key' + row_key = "row-key" row_obj = object() batch._row_map[row_key] = row_obj result = batch._get_row(row_key) @@ -193,7 +193,7 @@ def test__get_row_create_new(self): low_level_table.mock_row = mock_row = object() # Actually get the row (which creates a row via a low-level table). - row_key = 'row-key' + row_key = "row-key" result = batch._get_row(row_key) self.assertEqual(result, mock_row) @@ -208,17 +208,19 @@ def _put_helper(self, use_wal_none=False): table = object() batch = self._make_one(table) batch._timestamp = timestamp = object() - row_key = 'row-key' + row_key = "row-key" batch._row_map[row_key] = row = _MockRow() - col1_fam = 'cf1' - col1_qual = 'qual1' - value1 = 'value1' - col2_fam = 'cf2' - col2_qual = 'qual2' - value2 = 'value2' - data = {(col1_fam + ':' + col1_qual).encode('utf-8'): value1, - (col2_fam + ':' + col2_qual).encode('utf-8'): value2} + col1_fam = "cf1" + col1_qual = "qual1" + value1 = "value1" + col2_fam = "cf2" + col2_qual = "qual2" + value2 = "value2" + data = { + (col1_fam + ":" + col1_qual).encode("utf-8"): value1, + (col2_fam + ":" + col2_qual).encode("utf-8"): value2, + } self.assertEqual(batch._mutation_count, 0) self.assertEqual(row.set_cell_calls, []) @@ -235,13 +237,12 @@ def _put_helper(self, use_wal_none=False): ordered_calls = sorted(row.set_cell_calls, key=first_elt) cell1_args = (col1_fam, col1_qual, value1) - cell1_kwargs = {'timestamp': timestamp} + cell1_kwargs = {"timestamp": timestamp} cell2_args = (col2_fam, col2_qual, value2) - cell2_kwargs = {'timestamp': timestamp} - self.assertEqual(ordered_calls, [ - (cell1_args, cell1_kwargs), - (cell2_args, cell2_kwargs), - ]) + cell2_kwargs = {"timestamp": timestamp} + self.assertEqual( + ordered_calls, [(cell1_args, cell1_kwargs), (cell2_args, cell2_kwargs)] + ) def test_put_bad_wal(self): import warnings @@ -269,7 +270,7 @@ def _try_send(self): table = object() batch = CallTrySend(table) - row_key = 'row-key' + row_key = "row-key" batch._row_map[row_key] = _MockRow() self.assertEqual(batch._mutation_count, 0) @@ -284,23 +285,25 @@ def _delete_columns_test_helper(self, time_range=None): batch = self._make_one(table) batch._delete_range = time_range - col1_fam = 'cf1' - col2_fam = 'cf2' - col2_qual = 'col-name' - columns = [col1_fam + ':', col2_fam + ':' + col2_qual] + col1_fam = "cf1" + col2_fam = "cf2" + col2_qual = "col-name" + columns = [col1_fam + ":", col2_fam + ":" + col2_qual] row_object = _MockRow() batch._delete_columns(columns, row_object) self.assertEqual(row_object.commits, 0) cell_deleted_args = (col2_fam, col2_qual) - cell_deleted_kwargs = {'time_range': time_range} - self.assertEqual(row_object.delete_cell_calls, - [(cell_deleted_args, cell_deleted_kwargs)]) + cell_deleted_kwargs = {"time_range": time_range} + self.assertEqual( + row_object.delete_cell_calls, [(cell_deleted_args, cell_deleted_kwargs)] + ) fam_deleted_args = (col1_fam,) - fam_deleted_kwargs = {'columns': row_object.ALL_COLUMNS} - self.assertEqual(row_object.delete_cells_calls, - [(fam_deleted_args, fam_deleted_kwargs)]) + fam_deleted_kwargs = {"columns": row_object.ALL_COLUMNS} + self.assertEqual( + row_object.delete_cells_calls, [(fam_deleted_args, fam_deleted_kwargs)] + ) def test__delete_columns(self): self._delete_columns_test_helper() @@ -314,7 +317,7 @@ def _delete_entire_row_helper(self, use_wal_none=False): table = object() batch = self._make_one(table) - row_key = 'row-key' + row_key = "row-key" batch._row_map[row_key] = row = _MockRow() self.assertEqual(row.deletes, 0) @@ -346,7 +349,7 @@ def test_delete_entire_row_with_ts(self): batch = self._make_one(table) batch._delete_range = object() - row_key = 'row-key' + row_key = "row-key" batch._row_map[row_key] = row = _MockRow() self.assertEqual(row.deletes, 0) @@ -369,7 +372,7 @@ def _try_send(self): table = object() batch = CallTrySend(table) - row_key = 'row-key' + row_key = "row-key" batch._row_map[row_key] = _MockRow() self.assertEqual(batch._mutation_count, 0) @@ -383,26 +386,28 @@ def test_delete_some_columns(self): table = object() batch = self._make_one(table) - row_key = 'row-key' + row_key = "row-key" batch._row_map[row_key] = row = _MockRow() self.assertEqual(batch._mutation_count, 0) - col1_fam = 'cf1' - col2_fam = 'cf2' - col2_qual = 'col-name' - columns = [col1_fam + ':', col2_fam + ':' + col2_qual] + col1_fam = "cf1" + col2_fam = "cf2" + col2_qual = "col-name" + columns = [col1_fam + ":", col2_fam + ":" + col2_qual] batch.delete(row_key, columns=columns) self.assertEqual(batch._mutation_count, 2) cell_deleted_args = (col2_fam, col2_qual) - cell_deleted_kwargs = {'time_range': None} - self.assertEqual(row.delete_cell_calls, - [(cell_deleted_args, cell_deleted_kwargs)]) + cell_deleted_kwargs = {"time_range": None} + self.assertEqual( + row.delete_cell_calls, [(cell_deleted_args, cell_deleted_kwargs)] + ) fam_deleted_args = (col1_fam,) - fam_deleted_kwargs = {'columns': row.ALL_COLUMNS} - self.assertEqual(row.delete_cells_calls, - [(fam_deleted_args, fam_deleted_kwargs)]) + fam_deleted_kwargs = {"columns": row.ALL_COLUMNS} + self.assertEqual( + row.delete_cells_calls, [(fam_deleted_args, fam_deleted_kwargs)] + ) def test_context_manager(self): klass = self._get_target_class() @@ -431,7 +436,7 @@ class BatchWithSend(_SendMixin, klass): with self.assertRaises(ValueError): with batch: - raise ValueError('Something bad happened') + raise ValueError("Something bad happened") self.assertTrue(batch._send_called) @@ -447,7 +452,7 @@ class BatchWithSend(_SendMixin, klass): with self.assertRaises(ValueError): with batch: - raise ValueError('Something bad happened') + raise ValueError("Something bad happened") self.assertFalse(batch._send_called) @@ -457,25 +462,25 @@ class BatchWithSend(_SendMixin, klass): class Test__get_column_pairs(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.batch import _get_column_pairs + return _get_column_pairs(*args, **kwargs) def test_it(self): - columns = [b'cf1', u'cf2:', 'cf3::', 'cf3:name1', 'cf3:name2'] + columns = [b"cf1", u"cf2:", "cf3::", "cf3:name1", "cf3:name2"] result = self._call_fut(columns) expected_result = [ - ['cf1', None], - ['cf2', None], - ['cf3', ''], - ['cf3', 'name1'], - ['cf3', 'name2'], + ["cf1", None], + ["cf2", None], + ["cf3", ""], + ["cf3", "name1"], + ["cf3", "name2"], ] self.assertEqual(result, expected_result) def test_bad_column(self): - columns = ['a:b:c'] + columns = ["a:b:c"] with self.assertRaises(ValueError): self._call_fut(columns) @@ -490,7 +495,7 @@ def test_bad_columns_var(self): self._call_fut(columns) def test_column_family_with_require_qualifier(self): - columns = ['a:'] + columns = ["a:"] with self.assertRaises(ValueError): self._call_fut(columns, require_qualifier=True) @@ -529,13 +534,11 @@ def delete_cells(self, *args, **kwargs): class _MockTable(object): - def __init__(self, low_level_table): self._low_level_table = low_level_table class _MockLowLevelTable(object): - def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs diff --git a/unit_tests/test_connection.py b/unit_tests/test_connection.py index 714babd..9967428 100644 --- a/unit_tests/test_connection.py +++ b/unit_tests/test_connection.py @@ -19,19 +19,21 @@ class Test__get_instance(unittest.TestCase): - def _call_fut(self): from google.cloud.happybase.connection import _get_instance + return _get_instance() def _helper(self, instances=(), failed_locations=()): from functools import partial client_with_instances = partial( - _Client, instances=instances, failed_locations=failed_locations) + _Client, instances=instances, failed_locations=failed_locations + ) - with mock.patch('google.cloud.happybase.connection.Client', - client_with_instances): + with mock.patch( + "google.cloud.happybase.connection.Client", client_with_instances + ): result = self._call_fut() # If we've reached this point, then _call_fut didn't fail, so we know @@ -40,7 +42,7 @@ def _helper(self, instances=(), failed_locations=()): self.assertEqual(result, instance) client = instance.client self.assertEqual(client.args, ()) - expected_kwargs = {'admin': True} + expected_kwargs = {"admin": True} self.assertEqual(client.kwargs, expected_kwargs) def test_default(self): @@ -58,16 +60,15 @@ def test_with_too_many_instances(self): def test_with_failed_locations(self): instance = _Instance() - failed_location = 'us-central1-c' + failed_location = "us-central1-c" with self.assertRaises(ValueError): - self._helper(instances=[instance], - failed_locations=[failed_location]) + self._helper(instances=[instance], failed_locations=[failed_location]) class TestConnection(unittest.TestCase): - def _get_target_class(self): from google.cloud.happybase.connection import Connection + return Connection def _make_one(self, *args, **kwargs): @@ -79,13 +80,13 @@ def test_constructor_defaults(self): self.assertEqual(connection._instance, instance) self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, '_') + self.assertEqual(connection.table_prefix_separator, "_") def test_constructor_no_autoconnect(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, '_') + self.assertEqual(connection.table_prefix_separator, "_") def test_constructor_missing_instance(self): instance = _Instance() @@ -93,35 +94,35 @@ def test_constructor_missing_instance(self): def mock_get_instance(): return instance - with mock.patch('google.cloud.happybase.connection._get_instance', - mock_get_instance): - connection = self._make_one( - autoconnect=False, instance=None) + with mock.patch( + "google.cloud.happybase.connection._get_instance", mock_get_instance + ): + connection = self._make_one(autoconnect=False, instance=None) self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, '_') + self.assertEqual(connection.table_prefix_separator, "_") self.assertEqual(connection._instance, instance) def test_constructor_explicit(self): autoconnect = False - table_prefix = 'table-prefix' - table_prefix_separator = 'sep' + table_prefix = "table-prefix" + table_prefix_separator = "sep" instance = _Instance() connection = self._make_one( autoconnect=autoconnect, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance) + instance=instance, + ) self.assertTrue(connection._instance is instance) self.assertEqual(connection.table_prefix, table_prefix) - self.assertEqual(connection.table_prefix_separator, - table_prefix_separator) + self.assertEqual(connection.table_prefix_separator, table_prefix_separator) def test_constructor_with_unknown_argument(self): instance = _Instance() with self.assertRaises(TypeError): - self._make_one(instance=instance, unknown='foo') + self._make_one(instance=instance, unknown="foo") def test_constructor_with_legacy_args(self): import warnings @@ -129,16 +130,20 @@ def test_constructor_with_legacy_args(self): instance = _Instance() with warnings.catch_warnings(record=True) as warned: self._make_one( - instance=instance, host=object(), - port=object(), compat=object(), - transport=object(), protocol=object()) + instance=instance, + host=object(), + port=object(), + compat=object(), + transport=object(), + protocol=object(), + ) self.assertEqual(len(warned), 1) - self.assertIn('host', str(warned[0])) - self.assertIn('port', str(warned[0])) - self.assertIn('compat', str(warned[0])) - self.assertIn('transport', str(warned[0])) - self.assertIn('protocol', str(warned[0])) + self.assertIn("host", str(warned[0])) + self.assertIn("port", str(warned[0])) + self.assertIn("compat", str(warned[0])) + self.assertIn("transport", str(warned[0])) + self.assertIn("protocol", str(warned[0])) def test_constructor_non_string_prefix(self): table_prefix = object() @@ -151,30 +156,30 @@ def test_constructor_non_string_prefix_separator(self): with self.assertRaises(TypeError): self._make_one( - autoconnect=False, - table_prefix_separator=table_prefix_separator) + autoconnect=False, table_prefix_separator=table_prefix_separator + ) def test__table_name_with_prefix_set(self): - table_prefix = 'table-prefix' - table_prefix_separator = '<>' + table_prefix = "table-prefix" + table_prefix_separator = "<>" instance = _Instance() connection = self._make_one( autoconnect=False, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance) + instance=instance, + ) - name = 'some-name' + name = "some-name" prefixed = connection._table_name(name) - self.assertEqual(prefixed, - table_prefix + table_prefix_separator + name) + self.assertEqual(prefixed, table_prefix + table_prefix_separator + name) def test__table_name_with_no_prefix_set(self): instance = _Instance() connection = self._make_one(autoconnect=False, instance=instance) - name = 'some-name' + name = "some-name" prefixed = connection._table_name(name) self.assertEqual(prefixed, name) @@ -184,7 +189,7 @@ def test_table_factory(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" table = connection.table(name) self.assertTrue(isinstance(table, Table)) @@ -195,14 +200,16 @@ def _table_factory_prefix_helper(self, use_prefix=True): from google.cloud.happybase.table import Table instance = _Instance() # Avoid implicit environ check. - table_prefix = 'table-prefix' - table_prefix_separator = '<>' + table_prefix = "table-prefix" + table_prefix_separator = "<>" connection = self._make_one( - autoconnect=False, table_prefix=table_prefix, + autoconnect=False, + table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance) + instance=instance, + ) - name = 'table-name' + name = "table-name" table = connection.table(name, use_prefix=use_prefix) self.assertTrue(isinstance(table, Table)) @@ -222,12 +229,11 @@ def test_table_factory_with_ignored_prefix(self): def test_tables(self): from google.cloud.bigtable.table import Table - table_name1 = 'table-name1' - table_name2 = 'table-name2' - instance = _Instance(list_tables_result=[ - Table(table_name1, None), - Table(table_name2, None), - ]) + table_name1 = "table-name1" + table_name2 = "table-name2" + instance = _Instance( + list_tables_result=[Table(table_name1, None), Table(table_name2, None)] + ) connection = self._make_one(autoconnect=False, instance=instance) result = connection.tables() self.assertEqual(result, [table_name1, table_name2]) @@ -235,20 +241,21 @@ def test_tables(self): def test_tables_with_prefix(self): from google.cloud.bigtable.table import Table - table_prefix = 'prefix' - table_prefix_separator = '<>' - unprefixed_table_name1 = 'table-name1' - - table_name1 = (table_prefix + table_prefix_separator + - unprefixed_table_name1) - table_name2 = 'table-name2' - instance = _Instance(list_tables_result=[ - Table(table_name1, None), - Table(table_name2, None), - ]) + table_prefix = "prefix" + table_prefix_separator = "<>" + unprefixed_table_name1 = "table-name1" + + table_name1 = table_prefix + table_prefix_separator + unprefixed_table_name1 + table_name2 = "table-name2" + instance = _Instance( + list_tables_result=[Table(table_name1, None), Table(table_name2, None)] + ) connection = self._make_one( - autoconnect=False, instance=instance, table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator) + autoconnect=False, + instance=instance, + table_prefix=table_prefix, + table_prefix_separator=table_prefix_separator, + ) result = connection.tables() self.assertEqual(result, [unprefixed_table_name1]) @@ -262,18 +269,18 @@ def mock_parse_family_option(option): called_options.append(option) return mock_gc_rule - name = 'table-name' - col_fam1 = 'cf1' + name = "table-name" + col_fam1 = "cf1" col_fam_option1 = object() - col_fam2 = u'cf2' + col_fam2 = u"cf2" col_fam_option2 = object() - col_fam3 = b'cf3' + col_fam3 = b"cf3" col_fam_option3 = object() families = { col_fam1: col_fam_option1, # A trailing colon is also allowed. - col_fam2 + ':': col_fam_option2, - col_fam3 + b':': col_fam_option3, + col_fam2 + ":": col_fam_option2, + col_fam3 + b":": col_fam_option3, } tables_created = [] @@ -284,7 +291,7 @@ def make_table(*args, **kwargs): return result patch = mock.patch.multiple( - 'google.cloud.happybase.connection', + "google.cloud.happybase.connection", _LowLevelTable=make_table, _parse_family_option=mock_parse_family_option, ) @@ -300,17 +307,18 @@ def make_table(*args, **kwargs): # Check if our mock was called twice, but we don't know the order. self.assertEqual( set(called_options), - set([col_fam_option1, col_fam_option2, col_fam_option3])) + set([col_fam_option1, col_fam_option2, col_fam_option3]), + ) col_fam_dict = table_instance.col_fam_dict - expected_cf_list = ['cf1', 'cf2', 'cf3'] + expected_cf_list = ["cf1", "cf2", "cf3"] self.assertEqual(sorted(col_fam_dict), expected_cf_list) def test_create_table_bad_type(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" families = None with self.assertRaises(TypeError): connection.create_table(name, families) @@ -319,7 +327,7 @@ def test_create_table_bad_value(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" families = {} with self.assertRaises(ValueError): connection.create_table(name, families) @@ -331,15 +339,14 @@ def _create_table_error_helper(self, err_val, err_type): tables_created = [] def make_table(*args, **kwargs): - kwargs['create_error'] = err_val + kwargs["create_error"] = err_val result = _MockLowLevelTable(*args, **kwargs) tables_created.append(result) return result - name = 'table-name' - families = {'foo': {}} - with mock.patch('google.cloud.happybase.connection._LowLevelTable', - make_table): + name = "table-name" + families = {"foo": {}} + with mock.patch("google.cloud.happybase.connection._LowLevelTable", make_table): with self.assertRaises(err_type): connection.create_table(name, families) @@ -351,15 +358,16 @@ def test_create_table_already_exists(self): from grpc.framework.interfaces.face import face from google.cloud.happybase.connection import AlreadyExists - err_val = face.NetworkError(None, None, - interfaces.StatusCode.ALREADY_EXISTS, None) + err_val = face.NetworkError( + None, None, interfaces.StatusCode.ALREADY_EXISTS, None + ) self._create_table_error_helper(err_val, AlreadyExists) def test_create_table_connection_error(self): from grpc.beta import interfaces from grpc.framework.interfaces.face import face - err_val = face.NetworkError(None, None, - interfaces.StatusCode.INTERNAL, None) + + err_val = face.NetworkError(None, None, interfaces.StatusCode.INTERNAL, None) self._create_table_error_helper(err_val, face.NetworkError) def test_create_table_other_error(self): @@ -376,9 +384,8 @@ def make_table(*args, **kwargs): tables_created.append(result) return result - name = 'table-name' - with mock.patch('google.cloud.happybase.connection._LowLevelTable', - make_table): + name = "table-name" + with mock.patch("google.cloud.happybase.connection._LowLevelTable", make_table): connection.delete_table(name, disable=disable) # Just one table would have been created. @@ -407,7 +414,7 @@ def test_enable_table(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" with warnings.catch_warnings(record=True) as warned: connection.enable_table(name) @@ -422,7 +429,7 @@ def test_disable_table(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" with warnings.catch_warnings(record=True) as warned: connection.disable_table(name) @@ -437,7 +444,7 @@ def test_is_table_enabled(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" with warnings.catch_warnings(record=True) as warned: result = connection.is_table_enabled(name) @@ -453,7 +460,7 @@ def test_compact_table(self): instance = _Instance() # Avoid implicit environ check. connection = self._make_one(autoconnect=False, instance=instance) - name = 'table-name' + name = "table-name" with warnings.catch_warnings(record=True) as warned: connection.compact_table(name) @@ -463,9 +470,9 @@ def test_compact_table(self): class Test__parse_family_option(unittest.TestCase): - def _call_fut(self, option): from google.cloud.happybase.connection import _parse_family_option + return _parse_family_option(option) def test_dictionary_no_keys(self): @@ -481,19 +488,19 @@ def test_null(self): def test_dictionary_bad_key(self): import warnings - option = {'badkey': None} + option = {"badkey": None} with warnings.catch_warnings(record=True) as warned: result = self._call_fut(option) self.assertEqual(result, None) self.assertEqual(len(warned), 1) - self.assertIn('badkey', str(warned[0])) + self.assertIn("badkey", str(warned[0])) def test_dictionary_versions_key(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule versions = 42 - option = {'max_versions': versions} + option = {"max_versions": versions} result = self._call_fut(option) gc_rule = MaxVersionsGCRule(versions) @@ -505,7 +512,7 @@ def test_dictionary_ttl_key(self): time_to_live = 24 * 60 * 60 max_age = datetime.timedelta(days=1) - option = {'time_to_live': time_to_live} + option = {"time_to_live": time_to_live} result = self._call_fut(option) gc_rule = MaxAgeGCRule(max_age) @@ -519,10 +526,7 @@ def test_dictionary_both_keys(self): versions = 42 time_to_live = 24 * 60 * 60 - option = { - 'max_versions': versions, - 'time_to_live': time_to_live, - } + option = {"max_versions": versions, "time_to_live": time_to_live} result = self._call_fut(option) max_age = datetime.timedelta(days=1) @@ -541,12 +545,11 @@ def test_non_dictionary(self): class _Client(object): - def __init__(self, *args, **kwargs): - self.instances = kwargs.pop('instances', []) + self.instances = kwargs.pop("instances", []) for instance in self.instances: instance.client = self - self.failed_locations = kwargs.pop('failed_locations', []) + self.failed_locations = kwargs.pop("failed_locations", []) self.args = args self.kwargs = kwargs @@ -555,7 +558,6 @@ def list_instances(self): class _Instance(object): - def __init__(self, list_tables_result=()): # Included to support Connection.__del__ self._client = _Client() @@ -566,11 +568,10 @@ def list_tables(self): class _MockLowLevelTable(object): - def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs - self.create_error = kwargs.get('create_error') + self.create_error = kwargs.get("create_error") self.delete_calls = 0 self.create_calls = 0 self.col_fam_dict = {} diff --git a/unit_tests/test_pool.py b/unit_tests/test_pool.py index a1ecc66..1182413 100644 --- a/unit_tests/test_pool.py +++ b/unit_tests/test_pool.py @@ -19,9 +19,9 @@ class TestConnectionPool(unittest.TestCase): - def _get_target_class(self): from google.cloud.happybase.pool import ConnectionPool + return ConnectionPool def _make_one(self, *args, **kwargs): @@ -49,8 +49,8 @@ def test_constructor_defaults(self): self.assertTrue(connection._instance is instance) def test_constructor_passes_kwargs(self): - table_prefix = 'foo' - table_prefix_separator = '<>' + table_prefix = "foo" + table_prefix_separator = "<>" instance = _Instance() # Avoid implicit environ check. size = 1 @@ -58,12 +58,12 @@ def test_constructor_passes_kwargs(self): size, table_prefix=table_prefix, table_prefix_separator=table_prefix_separator, - instance=instance) + instance=instance, + ) for connection in pool._queue.queue: self.assertEqual(connection.table_prefix, table_prefix) - self.assertEqual(connection.table_prefix_separator, - table_prefix_separator) + self.assertEqual(connection.table_prefix_separator, table_prefix_separator) def test_constructor_ignores_autoconnect(self): from google.cloud.happybase.connection import Connection @@ -85,8 +85,7 @@ def open(self): # Then make sure autoconnect=True is ignored in a pool. size = 1 - with mock.patch('google.cloud.happybase.pool.Connection', - ConnectionWithOpen): + with mock.patch("google.cloud.happybase.pool.Connection", ConnectionWithOpen): pool = self._make_one(size, autoconnect=True, instance=instance) for connection in pool._queue.queue: @@ -103,8 +102,7 @@ def test_constructor_infers_instance(self): mock_get_instance = mock.create_autospec(_get_instance) mock_get_instance.return_value = instance - with mock.patch('google.cloud.happybase.pool._get_instance', - mock_get_instance): + with mock.patch("google.cloud.happybase.pool._get_instance", mock_get_instance): pool = self._make_one(size) for connection in pool._queue.queue: @@ -131,7 +129,7 @@ def _make_one_with_mock_queue(self, queue_return): # or instances to be created in the constructor. size = -1 instance = object() - with mock.patch('google.cloud.happybase.pool._MIN_POOL_SIZE', size): + with mock.patch("google.cloud.happybase.pool._MIN_POOL_SIZE", size): pool = self._make_one(size, instance=instance) pool._queue = _Queue(queue_return) @@ -165,26 +163,27 @@ def test_connection_is_context_manager(self): pool = self._make_one_with_mock_queue(queue_return) cnxn_context = pool.connection() if six.PY3: # pragma: NO COVER Python 3 - self.assertTrue(isinstance(cnxn_context, - contextlib._GeneratorContextManager)) + self.assertTrue( + isinstance(cnxn_context, contextlib._GeneratorContextManager) + ) else: - self.assertTrue(isinstance(cnxn_context, - contextlib.GeneratorContextManager)) + self.assertTrue( + isinstance(cnxn_context, contextlib.GeneratorContextManager) + ) def test_connection_no_current_cnxn(self): queue_return = _Connection() pool = self._make_one_with_mock_queue(queue_return) timeout = 55 - self.assertFalse(hasattr(pool._thread_connections, 'current')) + self.assertFalse(hasattr(pool._thread_connections, "current")) with pool.connection(timeout=timeout) as connection: self.assertEqual(pool._thread_connections.current, queue_return) self.assertTrue(connection is queue_return) - self.assertFalse(hasattr(pool._thread_connections, 'current')) + self.assertFalse(hasattr(pool._thread_connections, "current")) self.assertEqual(pool._queue._get_calls, [(True, timeout)]) - self.assertEqual(pool._queue._put_calls, - [(queue_return, None, None)]) + self.assertEqual(pool._queue._put_calls, [(queue_return, None, None)]) def test_connection_with_current_cnxn(self): current_cnxn = _Connection() @@ -206,20 +205,17 @@ class _Client(object): class _Connection(object): - def open(self): pass class _Instance(object): - def __init__(self): # Included to support Connection.__del__ self._client = _Client() class _Queue(object): - def __init__(self, result=None): self.result = result self._get_calls = [] @@ -229,6 +225,7 @@ def get(self, block=None, timeout=None): self._get_calls.append((block, timeout)) if self.result is None: import six + raise six.moves.queue.Empty else: return self.result diff --git a/unit_tests/test_table.py b/unit_tests/test_table.py index ea4ff9e..6783e08 100644 --- a/unit_tests/test_table.py +++ b/unit_tests/test_table.py @@ -18,9 +18,9 @@ class Test_make_row(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import make_row + return make_row(*args, **kwargs) def test_it(self): @@ -29,9 +29,9 @@ def test_it(self): class Test_make_ordered_row(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import make_ordered_row + return make_ordered_row(*args, **kwargs) def test_it(self): @@ -40,16 +40,16 @@ def test_it(self): class TestTable(unittest.TestCase): - def _get_target_class(self): from google.cloud.happybase.table import Table + return Table def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): - name = 'table-name' + name = "table-name" instance = object() connection = _Connection(instance) tables_constructed = [] @@ -59,8 +59,9 @@ def make_low_level_table(*args, **kwargs): tables_constructed.append(result) return result - with mock.patch('google.cloud.happybase.table._LowLevelTable', - make_low_level_table): + with mock.patch( + "google.cloud.happybase.table._LowLevelTable", make_low_level_table + ): table = self._make_one(name, connection) self.assertEqual(table.name, name) self.assertEqual(table.connection, connection) @@ -71,7 +72,7 @@ def make_low_level_table(*args, **kwargs): self.assertEqual(table_instance.kwargs, {}) def test_constructor_null_connection(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) self.assertEqual(table.name, name) @@ -79,13 +80,13 @@ def test_constructor_null_connection(self): self.assertEqual(table._low_level_table, None) def test_families(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() # Mock the column families to be returned. - col_fam_name = 'fam' + col_fam_name = "fam" gc_rule = object() col_fam = _MockLowLevelColumnFamily(col_fam_name, gc_rule=gc_rule) col_fams = {col_fam_name: col_fam} @@ -98,8 +99,9 @@ def mock_gc_rule_to_dict(gc_rule): to_dict_calls.append(gc_rule) return to_dict_result - with mock.patch('google.cloud.happybase.table._gc_rule_to_dict', - mock_gc_rule_to_dict): + with mock.patch( + "google.cloud.happybase.table._gc_rule_to_dict", mock_gc_rule_to_dict + ): result = table.families() self.assertEqual(result, {col_fam_name: to_dict_result}) @@ -107,12 +109,12 @@ def mock_gc_rule_to_dict(gc_rule): self.assertEqual(to_dict_calls, [gc_rule]) def test___repr__(self): - name = 'table-name' + name = "table-name" table = self._make_one(name, None) - self.assertEqual(repr(table), '') + self.assertEqual(repr(table), "") def test_regions(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) @@ -120,7 +122,7 @@ def test_regions(self): table.regions() def test_row_empty_row(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -134,30 +136,28 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - row_key = 'row-key' + row_key = "row-key" timestamp = object() - with mock.patch('google.cloud.happybase.table._filter_chain_helper', - mock_filter_chain_helper): + with mock.patch( + "google.cloud.happybase.table._filter_chain_helper", + mock_filter_chain_helper, + ): result = table.row(row_key, timestamp=timestamp) # read_row_result == None --> No results. self.assertEqual(result, {}) read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) + read_row_kwargs = {"filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] + ) - expected_kwargs = { - 'filters': [], - 'versions': 1, - 'timestamp': timestamp, - } + expected_kwargs = {"filters": [], "versions": 1, "timestamp": timestamp} self.assertEqual(mock_filters, [expected_kwargs]) def test_row_with_columns(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -178,10 +178,10 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - row_key = 'row-key' + row_key = "row-key" columns = object() patch = mock.patch.multiple( - 'google.cloud.happybase.table', + "google.cloud.happybase.table", _filter_chain_helper=mock_filter_chain_helper, _columns_filter_helper=mock_columns_filter_helper, ) @@ -192,24 +192,24 @@ def mock_filter_chain_helper(**kwargs): self.assertEqual(result, {}) read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) + read_row_kwargs = {"filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] + ) self.assertEqual(mock_columns, [(columns,)]) expected_kwargs = { - 'filters': [fake_col_filter], - 'versions': 1, - 'timestamp': None, + "filters": [fake_col_filter], + "versions": 1, + "timestamp": None, } self.assertEqual(mock_filters, [expected_kwargs]) def test_row_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key = 'row-key' - name = 'table-name' + row_key = "row-key" + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -231,13 +231,13 @@ def mock_cells_to_pairs(*args, **kwargs): mock_cells.append((args, kwargs)) return [fake_pair] - col_fam = u'cf1' - qual = b'qual' + col_fam = u"cf1" + qual = b"qual" fake_cells = object() partial_row._cells = {col_fam: {qual: fake_cells}} include_timestamp = object() patch = mock.patch.multiple( - 'google.cloud.happybase.table', + "google.cloud.happybase.table", _filter_chain_helper=mock_filter_chain_helper, _cells_to_pairs=mock_cells_to_pairs, ) @@ -245,27 +245,22 @@ def mock_cells_to_pairs(*args, **kwargs): result = table.row(row_key, include_timestamp=include_timestamp) # The results come from _cells_to_pairs. - expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair} + expected_result = {col_fam.encode("ascii") + b":" + qual: fake_pair} self.assertEqual(result, expected_result) read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) + read_row_kwargs = {"filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] + ) - expected_kwargs = { - 'filters': [], - 'versions': 1, - 'timestamp': None, - } + expected_kwargs = {"filters": [], "versions": 1, "timestamp": None} self.assertEqual(mock_filters, [expected_kwargs]) - to_pairs_kwargs = {'include_timestamp': include_timestamp} - self.assertEqual(mock_cells, - [((fake_cells,), to_pairs_kwargs)]) + to_pairs_kwargs = {"include_timestamp": include_timestamp} + self.assertEqual(mock_cells, [((fake_cells,), to_pairs_kwargs)]) def test_rows_empty_row(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) @@ -273,7 +268,7 @@ def test_rows_empty_row(self): self.assertEqual(result, []) def test_rows_with_columns(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -290,8 +285,7 @@ def mock_columns_filter_helper(*args): fake_row_set = object() - def mock_get_row_set_from_rows( - *args): # pylint: disable=unused-argument + def mock_get_row_set_from_rows(*args): # pylint: disable=unused-argument return fake_row_set fake_filter = object() @@ -301,10 +295,10 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - rows = ['row-key'] + rows = ["row-key"] columns = object() patch = mock.patch.multiple( - 'google.cloud.happybase.table', + "google.cloud.happybase.table", _filter_chain_helper=mock_filter_chain_helper, _columns_filter_helper=mock_columns_filter_helper, _get_row_set_from_rows=mock_get_row_set_from_rows, @@ -316,28 +310,27 @@ def mock_filter_chain_helper(**kwargs): self.assertEqual(result, []) read_rows_args = () - read_rows_kwargs = {'row_set': fake_row_set, - 'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_rows_calls, [ - (read_rows_args, read_rows_kwargs), - ]) + read_rows_kwargs = {"row_set": fake_row_set, "filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_rows_calls, [(read_rows_args, read_rows_kwargs)] + ) self.assertEqual(mock_cols, [(columns,)]) expected_kwargs = { - 'filters': [fake_col_filter], - 'versions': 1, - 'timestamp': None, + "filters": [fake_col_filter], + "versions": 1, + "timestamp": None, } self.assertEqual(mock_filters, [expected_kwargs]) def test_rows_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key1 = 'row-key1' - row_key2 = 'row-key2' + row_key1 = "row-key1" + row_key2 = "row-key2" rows = [row_key1, row_key2] - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -350,8 +343,7 @@ def test_rows_with_results(self): # Set-up mocks. fake_row_set = object() - def mock_get_row_set_from_rows( - *args): # pylint: disable=unused-argument + def mock_get_row_set_from_rows(*args): # pylint: disable=unused-argument return fake_row_set fake_filter = object() @@ -368,13 +360,13 @@ def mock_cells_to_pairs(*args, **kwargs): mock_cells.append((args, kwargs)) return [fake_pair] - col_fam = u'cf1' - qual = b'qual' + col_fam = u"cf1" + qual = b"qual" fake_cells = object() row1._cells = {col_fam: {qual: fake_cells}} include_timestamp = object() patch = mock.patch.multiple( - 'google.cloud.happybase.table', + "google.cloud.happybase.table", _filter_chain_helper=mock_filter_chain_helper, _get_row_set_from_rows=mock_get_row_set_from_rows, _cells_to_pairs=mock_cells_to_pairs, @@ -383,28 +375,22 @@ def mock_cells_to_pairs(*args, **kwargs): result = table.rows(rows, include_timestamp=include_timestamp) # read_rows_result == PartialRowsData with row_key1 - expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair} + expected_result = {col_fam.encode("ascii") + b":" + qual: fake_pair} self.assertEqual(result, [(row_key1, expected_result)]) read_rows_args = () - read_rows_kwargs = {'row_set': fake_row_set, - 'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_rows_calls, [ - (read_rows_args, read_rows_kwargs), - ]) + read_rows_kwargs = {"row_set": fake_row_set, "filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_rows_calls, [(read_rows_args, read_rows_kwargs)] + ) - expected_kwargs = { - 'filters': [], - 'versions': 1, - 'timestamp': None, - } + expected_kwargs = {"filters": [], "versions": 1, "timestamp": None} self.assertEqual(mock_filters, [expected_kwargs]) - to_pairs_kwargs = {'include_timestamp': include_timestamp} - self.assertEqual(mock_cells, - [((fake_cells,), to_pairs_kwargs)]) + to_pairs_kwargs = {"include_timestamp": include_timestamp} + self.assertEqual(mock_cells, [((fake_cells,), to_pairs_kwargs)]) def test_cells_empty_row(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -418,33 +404,31 @@ def mock_filter_chain_helper(**kwargs): mock_filters.append(kwargs) return fake_filter - row_key = 'row-key' - column = 'fam:col1' - with mock.patch('google.cloud.happybase.table._filter_chain_helper', - mock_filter_chain_helper): + row_key = "row-key" + column = "fam:col1" + with mock.patch( + "google.cloud.happybase.table._filter_chain_helper", + mock_filter_chain_helper, + ): result = table.cells(row_key, column) # read_row_result == None --> No results. self.assertEqual(result, []) read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) + read_row_kwargs = {"filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] + ) - expected_kwargs = { - 'column': column, - 'versions': None, - 'timestamp': None, - } + expected_kwargs = {"column": column, "versions": None, "timestamp": None} self.assertEqual(mock_filters, [expected_kwargs]) def test_cells_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key = 'row-key' - name = 'table-name' + row_key = "row-key" + name = "table-name" connection = None table = self._make_one(name, connection) table._low_level_table = _MockLowLevelTable() @@ -471,42 +455,41 @@ def mock_cells_to_pairs(*args, **kwargs): mock_cells.append((args, kwargs)) return fake_result - col_fam = 'cf1' - qual = 'qual' + col_fam = "cf1" + qual = "qual" fake_cells = object() - partial_row._cells = {col_fam: {qual.encode('utf-8'): fake_cells}} - column = (col_fam + ':' + qual).encode('utf-8') + partial_row._cells = {col_fam: {qual.encode("utf-8"): fake_cells}} + column = (col_fam + ":" + qual).encode("utf-8") patch = mock.patch.multiple( - 'google.cloud.happybase.table', + "google.cloud.happybase.table", _filter_chain_helper=mock_filter_chain_helper, _cells_to_pairs=mock_cells_to_pairs, ) with patch: - result = table.cells(row_key, column, versions=versions, - timestamp=timestamp, - include_timestamp=include_timestamp) + result = table.cells( + row_key, + column, + versions=versions, + timestamp=timestamp, + include_timestamp=include_timestamp, + ) self.assertEqual(result, fake_result) read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) - - filter_kwargs = { - 'column': column, - 'versions': versions, - 'timestamp': timestamp, - } + read_row_kwargs = {"filter_": fake_filter} + self.assertEqual( + table._low_level_table.read_row_calls, [(read_row_args, read_row_kwargs)] + ) + + filter_kwargs = {"column": column, "versions": versions, "timestamp": timestamp} self.assertEqual(mock_filters, [filter_kwargs]) - to_pairs_kwargs = {'include_timestamp': include_timestamp} - self.assertEqual(mock_cells, - [((fake_cells,), to_pairs_kwargs)]) + to_pairs_kwargs = {"include_timestamp": include_timestamp} + self.assertEqual(mock_cells, [((fake_cells,), to_pairs_kwargs)]) def test_scan_with_batch_size(self): import warnings - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) # Use unknown to force a TypeError, so we don't need to @@ -516,12 +499,12 @@ def test_scan_with_batch_size(self): list(table.scan(batch_size=object(), unknown=None)) self.assertEqual(len(warned), 1) - self.assertIn('batch_size', str(warned[0])) + self.assertIn("batch_size", str(warned[0])) def test_scan_with_scan_batching(self): import warnings - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) # Use unknown to force a TypeError, so we don't need to @@ -531,12 +514,12 @@ def test_scan_with_scan_batching(self): list(table.scan(scan_batching=object(), unknown=None)) self.assertEqual(len(warned), 1) - self.assertIn('scan_batching', str(warned[0])) + self.assertIn("scan_batching", str(warned[0])) def test_scan_with_sorted_columns(self): import warnings - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) # Use unknown to force a TypeError, so we don't need to @@ -546,37 +529,45 @@ def test_scan_with_sorted_columns(self): list(table.scan(sorted_columns=object(), unknown=None)) self.assertEqual(len(warned), 1) - self.assertIn('sorted_columns', str(warned[0])) + self.assertIn("sorted_columns", str(warned[0])) def test_scan_with_invalid_limit(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) with self.assertRaises(ValueError): list(table.scan(limit=-10)) def test_scan_with_row_prefix_and_row_start(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) with self.assertRaises(ValueError): - list(table.scan(row_prefix='a', row_stop='abc')) + list(table.scan(row_prefix="a", row_stop="abc")) def test_scan_with_string_filter(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) with self.assertRaises(TypeError): - list(table.scan(filter='some-string')) - - def _scan_test_helper(self, row_limits=(None, None), row_prefix=None, - columns=None, filter_=None, timestamp=None, - include_timestamp=False, limit=None, rr_result=None, - expected_result=None): + list(table.scan(filter="some-string")) + + def _scan_test_helper( + self, + row_limits=(None, None), + row_prefix=None, + columns=None, + filter_=None, + timestamp=None, + include_timestamp=False, + limit=None, + rr_result=None, + expected_result=None, + ): import types from google.cloud.happybase.table import _string_successor - name = 'table-name' + name = "table-name" row_start, row_stop = row_limits connection = None table = self._make_one(name, connection) @@ -606,17 +597,22 @@ def mock_get_row_set_object(*args): # pylint: disable=unused-argument return fake_row_set patch = mock.patch.multiple( - 'google.cloud.happybase.table', + "google.cloud.happybase.table", _filter_chain_helper=mock_filter_chain_helper, _columns_filter_helper=mock_columns_filter_helper, _get_row_set_object=mock_get_row_set_object, ) with patch: - result = table.scan(row_start=row_start, row_stop=row_stop, - row_prefix=row_prefix, columns=columns, - filter=filter_, timestamp=timestamp, - include_timestamp=include_timestamp, - limit=limit) + result = table.scan( + row_start=row_start, + row_stop=row_stop, + row_prefix=row_prefix, + columns=columns, + filter=filter_, + timestamp=timestamp, + include_timestamp=include_timestamp, + limit=limit, + ) self.assertTrue(isinstance(result, types.GeneratorType)) # Need to consume the result while the monkey patch is applied. # read_rows_result == Empty PartialRowsData --> No results. @@ -629,14 +625,14 @@ def mock_get_row_set_object(*args): # pylint: disable=unused-argument row_stop = _string_successor(row_prefix) read_rows_kwargs = { - 'row_set': fake_row_set, - 'filter_': fake_filter, - 'limit': limit, + "row_set": fake_row_set, + "filter_": fake_filter, + "limit": limit, } - self.assertEqual(table._low_level_table.read_rows_calls, [ - (read_rows_args, read_rows_kwargs), - ]) + self.assertEqual( + table._low_level_table.read_rows_calls, [(read_rows_args, read_rows_kwargs)] + ) if columns is not None: self.assertEqual(mock_columns, [(columns,)]) @@ -648,11 +644,7 @@ def mock_get_row_set_object(*args): # pylint: disable=unused-argument filters.append(filter_) if columns: filters.append(fake_col_filter) - expected_kwargs = { - 'filters': filters, - 'versions': 1, - 'timestamp': timestamp, - } + expected_kwargs = {"filters": filters, "versions": 1, "timestamp": timestamp} self.assertEqual(mock_filters, [expected_kwargs]) def test_scan_with_columns(self): @@ -660,13 +652,13 @@ def test_scan_with_columns(self): self._scan_test_helper(columns=columns) def test_scan_with_row_start_and_stop(self): - row_start = 'bar' - row_stop = 'foo' + row_start = "bar" + row_stop = "foo" row_limits = (row_start, row_stop) self._scan_test_helper(row_limits=row_limits) def test_scan_with_row_prefix(self): - row_prefix = 'row-prefi' + row_prefix = "row-prefi" self._scan_test_helper(row_prefix=row_prefix) def test_scan_with_filter(self): @@ -681,20 +673,22 @@ def test_scan_with_no_results(self): def test_scan_with_results(self): from google.cloud.bigtable.row_data import PartialRowData - row_key1 = 'row-key1' + row_key1 = "row-key1" row1 = PartialRowData(row_key1) rr_result = _MockPartialRowsData(rows={row_key1: row1}, iterations=1) include_timestamp = object() expected_result = [(row_key1, {})] - self._scan_test_helper(include_timestamp=include_timestamp, - rr_result=rr_result, - expected_result=expected_result) + self._scan_test_helper( + include_timestamp=include_timestamp, + rr_result=rr_result, + expected_result=expected_result, + ) def test_put(self): from google.cloud.happybase.table import _WAL_SENTINEL - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) batches_created = [] @@ -704,11 +698,10 @@ def make_batch(*args, **kwargs): batches_created.append(result) return result - row = 'row-key' - data = {'fam:col': 'foo'} + row = "row-key" + data = {"fam:col": "foo"} timestamp = None - with mock.patch('google.cloud.happybase.table.Batch', - make_batch): + with mock.patch("google.cloud.happybase.table.Batch", make_batch): result = table.put(row, data, timestamp=timestamp) # There is no return value. @@ -719,10 +712,10 @@ def make_batch(*args, **kwargs): self.assertTrue(isinstance(batch, _MockBatch)) self.assertEqual(batch.args, (table,)) expected_kwargs = { - 'timestamp': timestamp, - 'batch_size': None, - 'transaction': False, - 'wal': _WAL_SENTINEL, + "timestamp": timestamp, + "batch_size": None, + "transaction": False, + "wal": _WAL_SENTINEL, } self.assertEqual(batch.kwargs, expected_kwargs) # Make sure it was a successful context manager @@ -733,7 +726,7 @@ def make_batch(*args, **kwargs): def test_delete(self): from google.cloud.happybase.table import _WAL_SENTINEL - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) batches_created = [] @@ -743,11 +736,10 @@ def make_batch(*args, **kwargs): batches_created.append(result) return result - row = 'row-key' - columns = ['fam:col1', 'fam:col2'] + row = "row-key" + columns = ["fam:col1", "fam:col2"] timestamp = None - with mock.patch('google.cloud.happybase.table.Batch', - make_batch): + with mock.patch("google.cloud.happybase.table.Batch", make_batch): result = table.delete(row, columns=columns, timestamp=timestamp) # There is no return value. @@ -758,10 +750,10 @@ def make_batch(*args, **kwargs): self.assertTrue(isinstance(batch, _MockBatch)) self.assertEqual(batch.args, (table,)) expected_kwargs = { - 'timestamp': timestamp, - 'batch_size': None, - 'transaction': False, - 'wal': _WAL_SENTINEL, + "timestamp": timestamp, + "batch_size": None, + "transaction": False, + "wal": _WAL_SENTINEL, } self.assertEqual(batch.kwargs, expected_kwargs) # Make sure it was a successful context manager @@ -770,7 +762,7 @@ def make_batch(*args, **kwargs): self.assertEqual(batch.delete_args, [(row, columns)]) def test_batch(self): - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) @@ -779,18 +771,21 @@ def test_batch(self): transaction = False # Must be False when batch_size is non-null wal = object() - with mock.patch('google.cloud.happybase.table.Batch', - _MockBatch): - result = table.batch(timestamp=timestamp, batch_size=batch_size, - transaction=transaction, wal=wal) + with mock.patch("google.cloud.happybase.table.Batch", _MockBatch): + result = table.batch( + timestamp=timestamp, + batch_size=batch_size, + transaction=transaction, + wal=wal, + ) self.assertTrue(isinstance(result, _MockBatch)) self.assertEqual(result.args, (table,)) expected_kwargs = { - 'timestamp': timestamp, - 'batch_size': batch_size, - 'transaction': transaction, - 'wal': wal, + "timestamp": timestamp, + "batch_size": batch_size, + "transaction": transaction, + "wal": wal, } self.assertEqual(result.kwargs, expected_kwargs) @@ -808,12 +803,12 @@ def counter_inc(self, row, column, value=1): self.value += value return self.value - name = 'table-name' + name = "table-name" connection = None table = TableWithInc(name, connection) - row = 'row-key' - column = 'fam:col1' + row = "row-key" + column = "fam:col1" self.assertEqual(TableWithInc.incremented, []) result = table.counter_get(row, column) self.assertEqual(result, counter_value) @@ -833,12 +828,12 @@ def counter_inc(self, row, column, value=1): self.value += value return self.value - name = 'table-name' + name = "table-name" connection = None table = TableWithInc(name, connection) - row = 'row-key' - column = 'fam:col1' + row = "row-key" + column = "fam:col1" dec_value = 987 self.assertEqual(TableWithInc.incremented, []) result = table.counter_dec(row, column, value=dec_value) @@ -848,13 +843,14 @@ def counter_inc(self, row, column, value=1): def _counter_inc_helper(self, row, column, value, commit_result): import six - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) # Mock the return values. table._low_level_table = _MockLowLevelTable() table._low_level_table.row_values[row] = row_obj = _MockLowLevelRow( - row, commit_result=commit_result) + row, commit_result=commit_result + ) self.assertFalse(row_obj._append) result = table.counter_inc(row, column, value=value) @@ -866,15 +862,14 @@ def _counter_inc_helper(self, row, column, value, commit_result): # Check the row values returned. row_obj = table._low_level_table.row_values[row] if isinstance(column, six.binary_type): - column = column.decode('utf-8') - self.assertEqual(row_obj.counts, - {tuple(column.split(':')): incremented_value}) + column = column.decode("utf-8") + self.assertEqual(row_obj.counts, {tuple(column.split(":")): incremented_value}) def test_counter_set(self): import struct from google.cloud.happybase.table import _WAL_SENTINEL - name = 'table-name' + name = "table-name" connection = None table = self._make_one(name, connection) batches_created = [] @@ -884,11 +879,10 @@ def make_batch(*args, **kwargs): batches_created.append(result) return result - row = 'row-key' - column = 'fam:col1' + row = "row-key" + column = "fam:col1" value = 42 - with mock.patch('google.cloud.happybase.table.Batch', - make_batch): + with mock.patch("google.cloud.happybase.table.Batch", make_batch): result = table.counter_set(row, column, value=value) # There is no return value. @@ -899,87 +893,81 @@ def make_batch(*args, **kwargs): self.assertTrue(isinstance(batch, _MockBatch)) self.assertEqual(batch.args, (table,)) expected_kwargs = { - 'timestamp': None, - 'batch_size': None, - 'transaction': False, - 'wal': _WAL_SENTINEL, + "timestamp": None, + "batch_size": None, + "transaction": False, + "wal": _WAL_SENTINEL, } self.assertEqual(batch.kwargs, expected_kwargs) # Make sure it was a successful context manager self.assertEqual(batch.exit_vals, [(None, None, None)]) - data = {column: struct.Struct('>q').pack(value)} + data = {column: struct.Struct(">q").pack(value)} self.assertEqual(batch.put_args, [(row, data)]) self.assertEqual(batch.delete_args, []) def test_counter_inc(self): import struct - row = 'row-key' - col_fam = u'fam' - col_qual = u'col1' - column = col_fam + u':' + col_qual + row = "row-key" + col_fam = u"fam" + col_qual = u"col1" + column = col_fam + u":" + col_qual value = 42 - packed_value = struct.pack('>q', value) + packed_value = struct.pack(">q", value) fake_timestamp = None - commit_result = { - col_fam: { - col_qual.encode(): [(packed_value, fake_timestamp)], - } - } + commit_result = {col_fam: {col_qual.encode(): [(packed_value, fake_timestamp)]}} self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_column_bytes(self): import struct - row = 'row-key' - col_fam = b'fam' - col_qual = b'col1' - column = col_fam + b':' + col_qual + row = "row-key" + col_fam = b"fam" + col_qual = b"col1" + column = col_fam + b":" + col_qual value = 42 - packed_value = struct.pack('>q', value) + packed_value = struct.pack(">q", value) fake_timestamp = None commit_result = { - col_fam.decode('utf-8'): { - col_qual: [(packed_value, fake_timestamp)], - } + col_fam.decode("utf-8"): {col_qual: [(packed_value, fake_timestamp)]} } self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_bad_result(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual + row = "row-key" + col_fam = "fam" + col_qual = "col1" + column = col_fam + ":" + col_qual value = 42 commit_result = None with self.assertRaises(TypeError): self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_result_key_error(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual + row = "row-key" + col_fam = "fam" + col_qual = "col1" + column = col_fam + ":" + col_qual value = 42 commit_result = {} with self.assertRaises(KeyError): self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_result_nested_key_error(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual + row = "row-key" + col_fam = "fam" + col_qual = "col1" + column = col_fam + ":" + col_qual value = 42 commit_result = {col_fam: {}} with self.assertRaises(KeyError): self._counter_inc_helper(row, column, value, commit_result) def test_counter_inc_result_non_unique_cell(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual + row = "row-key" + col_fam = "fam" + col_qual = "col1" + column = col_fam + ":" + col_qual value = 42 fake_timestamp = None packed_value = None @@ -988,7 +976,7 @@ def test_counter_inc_result_non_unique_cell(self): col_qual.encode(): [ (packed_value, fake_timestamp), (packed_value, fake_timestamp), - ], + ] } } with self.assertRaises(ValueError): @@ -996,9 +984,9 @@ def test_counter_inc_result_non_unique_cell(self): class Test__gc_rule_to_dict(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _gc_rule_to_dict + return _gc_rule_to_dict(*args, **kwargs) def test_with_null(self): @@ -1012,7 +1000,7 @@ def test_with_max_versions(self): max_versions = 2 gc_rule = MaxVersionsGCRule(max_versions) result = self._call_fut(gc_rule) - expected_result = {'max_versions': max_versions} + expected_result = {"max_versions": max_versions} self.assertEqual(result, expected_result) def test_with_max_age(self): @@ -1023,7 +1011,7 @@ def test_with_max_age(self): max_age = datetime.timedelta(seconds=time_to_live) gc_rule = MaxAgeGCRule(max_age) result = self._call_fut(gc_rule) - expected_result = {'time_to_live': time_to_live} + expected_result = {"time_to_live": time_to_live} self.assertEqual(result, expected_result) def test_with_non_gc_rule(self): @@ -1068,10 +1056,7 @@ def test_with_intersection_two_rules(self): rule2 = MaxVersionsGCRule(max_versions) gc_rule = GCRuleIntersection(rules=[rule1, rule2]) result = self._call_fut(gc_rule) - expected_result = { - 'max_versions': max_versions, - 'time_to_live': time_to_live, - } + expected_result = {"max_versions": max_versions, "time_to_live": time_to_live} self.assertEqual(result, expected_result) def test_with_intersection_two_nested_rules(self): @@ -1085,32 +1070,32 @@ def test_with_intersection_two_nested_rules(self): class Test__string_successor(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _string_successor + return _string_successor(*args, **kwargs) def test_with_alphanumeric(self): - self.assertEqual(self._call_fut(b'boa'), b'bob') - self.assertEqual(self._call_fut(b'abc1'), b'abc2') + self.assertEqual(self._call_fut(b"boa"), b"bob") + self.assertEqual(self._call_fut(b"abc1"), b"abc2") def test_with_last_byte(self): - self.assertEqual(self._call_fut(b'boa\xff'), b'bob') + self.assertEqual(self._call_fut(b"boa\xff"), b"bob") def test_with_empty_string(self): - self.assertEqual(self._call_fut(b''), b'') + self.assertEqual(self._call_fut(b""), b"") def test_with_all_last_bytes(self): - self.assertEqual(self._call_fut(b'\xff\xff\xff'), b'') + self.assertEqual(self._call_fut(b"\xff\xff\xff"), b"") def test_with_unicode_input(self): - self.assertEqual(self._call_fut(u'boa'), b'bob') + self.assertEqual(self._call_fut(u"boa"), b"bob") class Test__convert_to_time_range(unittest.TestCase): - def _call_fut(self, timestamp=None): from google.cloud.happybase.table import _convert_to_time_range + return _convert_to_time_range(timestamp=timestamp) def test_null(self): @@ -1136,17 +1121,17 @@ def test_success(self): class Test__cells_to_pairs(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _cells_to_pairs + return _cells_to_pairs(*args, **kwargs) def test_without_timestamp(self): from google.cloud.bigtable.row_data import Cell - value1 = 'foo' + value1 = "foo" cell1 = Cell(value=value1, timestamp_micros=None) - value2 = 'bar' + value2 = "bar" cell2 = Cell(value=value2, timestamp_micros=None) result = self._call_fut([cell1, cell2]) @@ -1155,73 +1140,70 @@ def test_without_timestamp(self): def test_with_timestamp(self): from google.cloud.bigtable.row_data import Cell - value1 = 'foo' + value1 = "foo" ts1_millis = 1221934570148 ts1 = ts1_millis * 1000 cell1 = Cell(value=value1, timestamp_micros=ts1) - value2 = 'bar' + value2 = "bar" ts2_millis = 1221955575548 ts2 = ts2_millis * 1000 cell2 = Cell(value=value2, timestamp_micros=ts2) result = self._call_fut([cell1, cell2], include_timestamp=True) - self.assertEqual(result, - [(value1, ts1_millis), (value2, ts2_millis)]) + self.assertEqual(result, [(value1, ts1_millis), (value2, ts2_millis)]) class Test__partial_row_to_dict(unittest.TestCase): - def _call_fut(self, partial_row_data, include_timestamp=False): from google.cloud.happybase.table import _partial_row_to_dict - return _partial_row_to_dict(partial_row_data, - include_timestamp=include_timestamp) + + return _partial_row_to_dict( + partial_row_data, include_timestamp=include_timestamp + ) def test_without_timestamp(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData - row_data = PartialRowData(b'row-key') - val1 = b'hi-im-bytes' - val2 = b'bi-im-hytes' - row_data._cells[u'fam1'] = { - b'col1': [Cell(val1, None)], - b'col2': [Cell(val2, None)], + row_data = PartialRowData(b"row-key") + val1 = b"hi-im-bytes" + val2 = b"bi-im-hytes" + row_data._cells[u"fam1"] = { + b"col1": [Cell(val1, None)], + b"col2": [Cell(val2, None)], } result = self._call_fut(row_data) - expected_result = { - b'fam1:col1': val1, - b'fam1:col2': val2, - } + expected_result = {b"fam1:col1": val1, b"fam1:col2": val2} self.assertEqual(result, expected_result) def test_with_timestamp(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData - row_data = PartialRowData(b'row-key') - val1 = b'hi-im-bytes' + row_data = PartialRowData(b"row-key") + val1 = b"hi-im-bytes" ts1_millis = 1221934570148 ts1 = ts1_millis * 1000 - val2 = b'bi-im-hytes' + val2 = b"bi-im-hytes" ts2_millis = 1331934880000 ts2 = ts2_millis * 1000 - row_data._cells[u'fam1'] = { - b'col1': [Cell(val1, ts1)], - b'col2': [Cell(val2, ts2)], + row_data._cells[u"fam1"] = { + b"col1": [Cell(val1, ts1)], + b"col2": [Cell(val2, ts2)], } result = self._call_fut(row_data, include_timestamp=True) expected_result = { - b'fam1:col1': (val1, ts1_millis), - b'fam1:col2': (val2, ts2_millis), + b"fam1:col1": (val1, ts1_millis), + b"fam1:col2": (val2, ts2_millis), } self.assertEqual(result, expected_result) class Test__filter_chain_helper(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _filter_chain_helper + return _filter_chain_helper(*args, **kwargs) def test_no_filters(self): @@ -1252,19 +1234,25 @@ def test_existing_filters(self): # only have one value set. self.assertEqual(result.num_cells, versions) - def _column_helper(self, num_filters, versions=None, timestamp=None, - column=None, col_fam=None, qual=None): - from google.cloud.bigtable.row_filters import ( - ColumnQualifierRegexFilter) + def _column_helper( + self, + num_filters, + versions=None, + timestamp=None, + column=None, + col_fam=None, + qual=None, + ): + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter from google.cloud.bigtable.row_filters import FamilyNameRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain if col_fam is None: - col_fam = 'cf1' + col_fam = "cf1" if qual is None: - qual = 'qual' + qual = "qual" if column is None: - column = col_fam + ':' + qual + column = col_fam + ":" + qual result = self._call_fut(column, versions=versions, timestamp=timestamp) self.assertTrue(isinstance(result, RowFilterChain)) @@ -1276,8 +1264,8 @@ def _column_helper(self, num_filters, versions=None, timestamp=None, # Relies on the fact that RowFilter instances can # only have one value set. - self.assertEqual(fam_filter.regex, col_fam.encode('utf-8')) - self.assertEqual(qual_filter.regex, qual.encode('utf-8')) + self.assertEqual(fam_filter.regex, col_fam.encode("utf-8")) + self.assertEqual(qual_filter.regex, qual.encode("utf-8")) return result @@ -1285,12 +1273,14 @@ def test_column_only(self): self._column_helper(num_filters=2) def test_column_bytes(self): - self._column_helper(num_filters=2, column=b'cfB:qualY', - col_fam=u'cfB', qual=u'qualY') + self._column_helper( + num_filters=2, column=b"cfB:qualY", col_fam=u"cfB", qual=u"qualY" + ) def test_column_unicode(self): - self._column_helper(num_filters=2, column=u'cfU:qualN', - col_fam=u'cfU', qual=u'qualN') + self._column_helper( + num_filters=2, column=u"cfU:qualN", col_fam=u"cfU", qual=u"qualN" + ) def test_with_versions(self): from google.cloud.bigtable.row_filters import CellsColumnLimitFilter @@ -1325,14 +1315,13 @@ def test_with_timestamp(self): def test_with_all_options(self): versions = 11 timestamp = 1441928298571 - self._column_helper(num_filters=4, versions=versions, - timestamp=timestamp) + self._column_helper(num_filters=4, versions=versions, timestamp=timestamp) class Test__columns_filter_helper(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _columns_filter_helper + return _columns_filter_helper(*args, **kwargs) def test_no_columns(self): @@ -1343,23 +1332,22 @@ def test_no_columns(self): def test_single_column(self): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter - col_fam = 'cf1' + col_fam = "cf1" columns = [col_fam] result = self._call_fut(columns) expected_result = FamilyNameRegexFilter(col_fam) self.assertEqual(result, expected_result) def test_column_and_column_families(self): - from google.cloud.bigtable.row_filters import ( - ColumnQualifierRegexFilter) + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter from google.cloud.bigtable.row_filters import FamilyNameRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain from google.cloud.bigtable.row_filters import RowFilterUnion - col_fam1 = 'cf1' - col_fam2 = 'cf2' - col_qual2 = 'qual2' - columns = [col_fam1, col_fam2 + ':' + col_qual2] + col_fam1 = "cf1" + col_fam2 = "cf2" + col_qual2 = "qual2" + columns = [col_fam1, col_fam2 + ":" + col_qual2] result = self._call_fut(columns) self.assertTrue(isinstance(result, RowFilterUnion)) @@ -1368,20 +1356,20 @@ def test_column_and_column_families(self): filter2 = result.filters[1] self.assertTrue(isinstance(filter1, FamilyNameRegexFilter)) - self.assertEqual(filter1.regex, col_fam1.encode('utf-8')) + self.assertEqual(filter1.regex, col_fam1.encode("utf-8")) self.assertTrue(isinstance(filter2, RowFilterChain)) filter2a, filter2b = filter2.filters self.assertTrue(isinstance(filter2a, FamilyNameRegexFilter)) - self.assertEqual(filter2a.regex, col_fam2.encode('utf-8')) + self.assertEqual(filter2a.regex, col_fam2.encode("utf-8")) self.assertTrue(isinstance(filter2b, ColumnQualifierRegexFilter)) - self.assertEqual(filter2b.regex, col_qual2.encode('utf-8')) + self.assertEqual(filter2b.regex, col_qual2.encode("utf-8")) class Test___get_row_set_object(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _get_row_set_object + return _get_row_set_object(*args, **kwargs) def test_row_set_object(self): @@ -1395,15 +1383,15 @@ def test_row_set_object(self): class Test___get_row_set_from_rows(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.happybase.table import _get_row_set_from_rows + return _get_row_set_from_rows(*args, **kwargs) def test_row_set_object(self): from google.cloud.bigtable.row_set import RowSet - rows = ['row_key1', 'row_key2'] + rows = ["row_key1", "row_key2"] row_set = self._call_fut(rows) self.assertIsInstance(row_set, RowSet) @@ -1411,20 +1399,17 @@ def test_row_set_object(self): class _Connection(object): - def __init__(self, instance): self._instance = instance class _MockLowLevelColumnFamily(object): - def __init__(self, column_family_id, gc_rule=None): self.column_family_id = column_family_id self.gc_rule = gc_rule class _MockLowLevelTable(object): - def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs @@ -1468,8 +1453,7 @@ def __init__(self, row_key, commit_result=None): self.commit_result = commit_result def increment_cell_value(self, column_family_id, column, int_value): - count = self.counts.setdefault((column_family_id, column), - self.COUNTER_DEFAULT) + count = self.counts.setdefault((column_family_id, column), self.COUNTER_DEFAULT) self.counts[(column_family_id, column)] = count + int_value def commit(self): @@ -1477,7 +1461,6 @@ def commit(self): class _MockBatch(object): - def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs @@ -1499,7 +1482,6 @@ def delete(self, *args): class _MockPartialRowsData(object): - def __init__(self, rows=None, iterations=0): self.rows = rows or {} self.consume_all_calls = 0