Skip to content

Commit

Permalink
Standardized BDD Logging
Browse files Browse the repository at this point in the history
Change print() statements to bdd_log(), defined in bdd_test_util.py
This change provides a manner for keeping all log statements in the
same format. Currently this format is simply the current time
followed by the speicified message.

Also added the copyright header to sdk_impl.py and removed unused
imports.

Change-Id: I87590bbcd6512eefcce46dc9e7f854597c203b17
Signed-off-by: Julian Carrivick <cjulian@au1.ibm.com>
  • Loading branch information
juliancarrivick-ibm committed Sep 6, 2016
1 parent 0f959c0 commit a757dbe
Show file tree
Hide file tree
Showing 7 changed files with 84 additions and 93 deletions.
16 changes: 8 additions & 8 deletions bddtests/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
import glob

from steps.bdd_test_util import cli_call
from steps.bdd_test_util import cli_call, bdd_log

from steps.coverage import saveCoverageFiles, createCoverageAggregate

Expand All @@ -20,14 +20,14 @@ def getDockerComposeFileArgsFromYamlFile(compose_yaml):
def after_scenario(context, scenario):
get_logs = context.config.userdata.get("logs", "N")
if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context):
print("Scenario {0} failed. Getting container logs".format(scenario.name))
bdd_log("Scenario {0} failed. Getting container logs".format(scenario.name))
file_suffix = "_" + scenario.name.replace(" ", "_") + ".log"
# get logs from the peer containers
for containerData in context.compose_containers:
with open(containerData.containerName + file_suffix, "w+") as logfile:
sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile)
if sys_rc !=0 :
print("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc))
bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc))
# get logs from the chaincode containers
cc_output, cc_error, cc_returncode = \
cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True)
Expand All @@ -36,15 +36,15 @@ def after_scenario(context, scenario):
with open(namePart + file_suffix, "w+") as logfile:
sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile)
if sys_rc !=0 :
print("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc))
bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc))
if 'doNotDecompose' in scenario.tags:
if 'compose_yaml' in context:
print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
bdd_log("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
else:
if 'compose_yaml' in context:
fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)

print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
bdd_log("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True)
context.compose_output, context.compose_error, context.compose_returncode = \
Expand All @@ -63,7 +63,7 @@ def after_scenario(context, scenario):
if context.compose_returncode == 0:
# Remove each container
for containerId in context.compose_output.splitlines():
#print("docker rm {0}".format(containerId))
#bdd_log("docker rm {0}".format(containerId))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(["docker", "rm", "-f", containerId], expect_success=True)

Expand All @@ -73,7 +73,7 @@ def before_all(context):

# stop any running peer that could get in the way before starting the tests
def after_all(context):
print("context.failed = {0}".format(context.failed))
bdd_log("context.failed = {0}".format(context.failed))

if coverageEnabled(context):
createCoverageAggregate()
3 changes: 2 additions & 1 deletion bddtests/steps/bdd_grpc_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import chaincode_pb2

import bdd_test_util
from bdd_test_util import bdd_log

from grpc.beta import implementations

Expand All @@ -46,7 +47,7 @@ def getTxResult(context, enrollId):

def getGRPCChannel(ipAddress):
channel = implementations.insecure_channel(ipAddress, 7051)
print("Returning GRPC for address: {0}".format(ipAddress))
bdd_log("Returning GRPC for address: {0}".format(ipAddress))
return channel

def getGRPCChannelAndUser(context, enrollId):
Expand Down
4 changes: 2 additions & 2 deletions bddtests/steps/bdd_test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ def cli_call(arg_list, expect_success=True):
output, error = p.communicate()
if p.returncode != 0:
if output is not None:
print("Output:\n" + output)
bdd_log("Output:\n" + output)
if error is not None:
print("Error Message:\n" + error)
bdd_log("Error Message:\n" + error)
if expect_success:
raise subprocess.CalledProcessError(p.returncode, arg_list, output)
return output, error, p.returncode
Expand Down
Loading

0 comments on commit a757dbe

Please sign in to comment.