Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(misc): Spelling #19678

Merged
merged 23 commits into from
Jan 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion RELEASING/Dockerfile.from_local_tarball
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \

RUN apt-get update -y

# Install dependencies to fix `curl https support error` and `elaying package configuration warning`
# Install dependencies to fix `curl https support error` and `delaying package configuration warning`
RUN apt-get install -y apt-transport-https apt-utils

# Install superset dependencies
Expand Down
2 changes: 1 addition & 1 deletion RELEASING/Dockerfile.from_svn_tarball
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \

RUN apt-get update -y

# Install dependencies to fix `curl https support error` and `elaying package configuration warning`
# Install dependencies to fix `curl https support error` and `delaying package configuration warning`
RUN apt-get install -y apt-transport-https apt-utils

# Install superset dependencies
Expand Down
12 changes: 6 additions & 6 deletions docs/static/resources/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -746,7 +746,7 @@
"type": "array"
},
"metrics": {
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.",
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.",
"items": {},
"nullable": true,
"type": "array"
Expand Down Expand Up @@ -1309,7 +1309,7 @@
"type": "boolean"
},
"metrics": {
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
"items": {},
"nullable": true,
"type": "array"
Expand Down Expand Up @@ -1968,7 +1968,7 @@
"type": "string"
},
"query_context_generation": {
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
"nullable": true,
"type": "boolean"
},
Expand Down Expand Up @@ -2075,7 +2075,7 @@
"type": "string"
},
"query_context_generation": {
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
"nullable": true,
"type": "boolean"
},
Expand Down Expand Up @@ -2760,7 +2760,7 @@
"type": "string"
},
"query_context_generation": {
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
"nullable": true,
"type": "boolean"
},
Expand Down Expand Up @@ -2867,7 +2867,7 @@
"type": "string"
},
"query_context_generation": {
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
"nullable": true,
"type": "boolean"
},
Expand Down
2 changes: 1 addition & 1 deletion scripts/cancel_github_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def print_commit(commit: Dict[str, Any], branch: str) -> None:
"--include-last/--skip-last",
default=False,
show_default=True,
help="Whether to also cancel the lastest run.",
help="Whether to also cancel the latest run.",
)
@click.option(
"--include-running/--skip-running",
Expand Down
12 changes: 6 additions & 6 deletions scripts/permissions_cleanup.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def cleanup_permissions() -> None:
pvms = security_manager.get_session.query(
security_manager.permissionview_model
).all()
print("# of permission view menues is: {}".format(len(pvms)))
print("# of permission view menus is: {}".format(len(pvms)))
pvms_dict = defaultdict(list)
for pvm in pvms:
pvms_dict[(pvm.permission, pvm.view_menu)].append(pvm)
Expand All @@ -43,9 +43,9 @@ def cleanup_permissions() -> None:
pvms = security_manager.get_session.query(
security_manager.permissionview_model
).all()
print("Stage 1: # of permission view menues is: {}".format(len(pvms)))
print("Stage 1: # of permission view menus is: {}".format(len(pvms)))

# 2. Clean up None permissions or view menues
# 2. Clean up None permissions or view menus
pvms = security_manager.get_session.query(
security_manager.permissionview_model
).all()
Expand All @@ -57,15 +57,15 @@ def cleanup_permissions() -> None:
pvms = security_manager.get_session.query(
security_manager.permissionview_model
).all()
print("Stage 2: # of permission view menues is: {}".format(len(pvms)))
print("Stage 2: # of permission view menus is: {}".format(len(pvms)))

# 3. Delete empty permission view menues from roles
# 3. Delete empty permission view menus from roles
roles = security_manager.get_session.query(security_manager.role_model).all()
for role in roles:
role.permissions = [p for p in role.permissions if p]
security_manager.get_session.commit()

# 4. Delete empty roles from permission view menues
# 4. Delete empty roles from permission view menus
pvms = security_manager.get_session.query(
security_manager.permissionview_model
).all()
Expand Down
2 changes: 1 addition & 1 deletion scripts/python_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
set -e

# Temporary fix, probably related with https://bugs.launchpad.net/ubuntu/+source/opencv/+bug/1890170
# MySQL was failling with:
# MySQL was failing with:
# from . import _mysql
# ImportError: /lib/x86_64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block
export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6
Expand Down
2 changes: 1 addition & 1 deletion scripts/tests/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ set -e
#
function reset_db() {
echo --------------------
echo Reseting test DB
echo Resetting test DB
echo --------------------
docker-compose stop superset-tests-worker superset || true
RESET_DB_CMD="psql \"postgresql://${DB_USER}:${DB_PASSWORD}@127.0.0.1:5432\" <<-EOF
Expand Down
2 changes: 1 addition & 1 deletion superset-websocket/spec/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ describe('server', () => {
expect(endMock).toHaveBeenLastCalledWith('OK');
});

test('reponds with a 404 when not found', () => {
test('responds with a 404 when not found', () => {
const endMock = jest.fn();
const writeHeadMock = jest.fn();

Expand Down
2 changes: 1 addition & 1 deletion superset-websocket/utils/client-ws-app/views/index.pug
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ block content

div Sockets connected:
span#socket-count 0
div Messages recevied:
div Messages received:
span#message-count 0
div Last message received:
code#message-debug
Expand Down
4 changes: 2 additions & 2 deletions superset/utils/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1026,7 +1026,7 @@ def send_mime_email(
smtp_password = config["SMTP_PASSWORD"]
smtp_starttls = config["SMTP_STARTTLS"]
smtp_ssl = config["SMTP_SSL"]
smpt_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"]
smtp_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"]

if dryrun:
logger.info("Dryrun enabled, email notification content is below:")
Expand All @@ -1035,7 +1035,7 @@ def send_mime_email(

# Default ssl context is SERVER_AUTH using the default system
# root CA certificates
ssl_context = ssl.create_default_context() if smpt_ssl_server_auth else None
ssl_context = ssl.create_default_context() if smtp_ssl_server_auth else None
smtp = (
smtplib.SMTP_SSL(smtp_host, smtp_port, context=ssl_context)
if smtp_ssl
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/charts/data/api_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ def test_with_filter_suppose_to_return_empty_data__no_data_returned(self):

def test_with_invalid_where_parameter__400(self):
self.query_context_payload["queries"][0]["filters"] = []
# erroneus WHERE-clause
# erroneous WHERE-clause
self.query_context_payload["queries"][0]["extras"]["where"] = "(gender abc def)"

rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/core_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -1619,7 +1619,7 @@ def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
Handle injected exceptions from the db mutator
"""

# Assert we can handle a custom excetion at the mutator level
# Assert we can handle a custom exception at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/csv_upload_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def mock_upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str:
container.exec_run(f"hdfs dfs -mkdir -p {dest_dir}")
dest = os.path.join(dest_dir, os.path.basename(filename))
container.exec_run(f"hdfs dfs -put {src} {dest}")
# hive external table expectes a directory for the location
# hive external table expects a directory for the location
return dest_dir


Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/datasets/api_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -1810,7 +1810,7 @@ def test_export_dataset_gamma(self):
"datasource_access", dataset.perm
)

# add perissions to allow export + access to query this dataset
# add permissions to allow export + access to query this dataset
gamma_role = security_manager.find_role("Gamma")
security_manager.add_permission_role(gamma_role, perm1)
security_manager.add_permission_role(gamma_role, perm2)
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/datasets/commands_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def test_export_dataset_command(self, mock_g):

metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])

# sort columns for deterministc comparison
# sort columns for deterministic comparison
metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name"))

Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/datasource_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def test_external_metadata_for_malicious_virtual_table(self):
resp = self.get_json_resp(url)
self.assertEqual(resp["error"], "Only `SELECT` statements are allowed")

def test_external_metadata_for_mutistatement_virtual_table(self):
def test_external_metadata_for_multistatement_virtual_table(self):
self.login(username="admin")
table = SqlaTable(
table_name="multistatement_sql_table",
Expand Down
6 changes: 3 additions & 3 deletions tests/integration_tests/model_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,15 +476,15 @@ def test_query_with_expr_groupby_timeseries(self):
# TODO(bkyryliuk): make it work for presto.
return

def cannonicalize_df(df):
def canonicalize_df(df):
ret = df.sort_values(by=list(df.columns.values), inplace=False)
ret.reset_index(inplace=True, drop=True)
return ret

df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
name_list1 = cannonicalize_df(df1).name.values.tolist()
name_list1 = canonicalize_df(df1).name.values.tolist()
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
name_list2 = cannonicalize_df(df1).name.values.tolist()
name_list2 = canonicalize_df(df1).name.values.tolist()
self.assertFalse(df2.empty)

assert name_list2 == name_list1
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/query_context_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def test_schema_deserialization(self):
for query_idx, query in enumerate(query_context.queries):
payload_query = payload["queries"][query_idx]

# check basic properies
# check basic properties
self.assertEqual(query.extras, payload_query["extras"])
self.assertEqual(query.filter, payload_query["filters"])
self.assertEqual(query.columns, payload_query["columns"])
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/reports/api_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,7 @@ def test_get_list_report_schedule_filter_type(self):
@pytest.mark.usefixtures("create_report_schedules")
def test_get_related_report_schedule(self):
"""
ReportSchedule Api: Test get releated report schedule
ReportSchedule Api: Test get related report schedule
"""
self.login(username="admin")
related_columns = ["created_by", "chart", "dashboard", "database"]
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/sqllab_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def test_sql_json(self):
data = self.run_sql("SELECT * FROM birth_names LIMIT 10", "1")
self.assertLess(0, len(data["data"]))

data = self.run_sql("SELECT * FROM unexistant_table", "2")
data = self.run_sql("SELECT * FROM nonexistent_table", "2")
if backend() == "presto":
assert (
data["errors"][0]["error_type"]
Expand Down
4 changes: 2 additions & 2 deletions tests/unit_tests/charts/test_post_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_pivot_df_no_cols_no_rows_single_metric():
""".strip()
)

# tranpose_pivot and combine_metrics do nothing in this case
# transpose_pivot and combine_metrics do nothing in this case
pivoted = pivot_df(
df,
rows=[],
Expand Down Expand Up @@ -169,7 +169,7 @@ def test_pivot_df_no_cols_no_rows_two_metrics():
""".strip()
)

# tranpose_pivot and combine_metrics do nothing in this case
# transpose_pivot and combine_metrics do nothing in this case
pivoted = pivot_df(
df,
rows=[],
Expand Down
4 changes: 2 additions & 2 deletions tests/unit_tests/db_engine_specs/test_snowflake.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@ def test_extract_errors() -> None:
)
]

msg = "syntax error line 1 at position 10 unexpected 'limmmited'."
msg = "syntax error line 1 at position 10 unexpected 'limited'."
result = SnowflakeEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='Please check your query for syntax errors at or near "limmmited". Then, try running your query again.',
message='Please check your query for syntax errors at or near "limited". Then, try running your query again.',
error_type=SupersetErrorType.SYNTAX_ERROR,
level=ErrorLevel.ERROR,
extra={
Expand Down
2 changes: 1 addition & 1 deletion tests/unit_tests/sql_parse_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,7 +675,7 @@ def test_extract_tables_nested_select() -> None:
"""
select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(COLUMN_NAME)
from INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME="bi_achivement_daily"),0x7e)));
WHERE TABLE_NAME="bi_achievement_daily"),0x7e)));
"""
)
== {Table("COLUMNS", "INFORMATION_SCHEMA")}
Expand Down