Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(issues): Don't treat %s as unique #75090

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -576,6 +576,7 @@ module = [
"tests.sentry.api.endpoints.issues.*",
"tests.sentry.event_manager.test_event_manager",
"tests.sentry.grouping.test_fingerprinting",
"tests.sentry.grouping.test_normalize_message",
"tests.sentry.hybridcloud.*",
"tests.sentry.issues",
"tests.sentry.issues.endpoints",
Expand Down
10 changes: 5 additions & 5 deletions src/sentry/grouping/parameterization.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,11 +240,11 @@ def num_tokens_from_string(token_str: str) -> int:
def is_probably_uniq_id(token_str: str) -> bool:
token_str = token_str.strip("\"'[]{}():;")
if len(token_str) < _UniqueId.TOKEN_LENGTH_MINIMUM:
return False
if (
token_str[0] == "<" and token_str[-1] == ">"
): # Don't replace already-parameterized tokens
return False
return False # Don't replace short tokens
if token_str[0] == "<" and token_str[-1] == ">":
return False # Don't replace already-parameterized tokens
if "%s" in token_str:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason not to add %d in here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Mostly a combination of the difficulty in testing this and I want to limit the amount of additional work we have do do on each token_str until we have a better way to do better performance testing as well

return False # Don't replace already-parameterized log messages
token_length_ratio = _UniqueId.num_tokens_from_string(token_str) / len(token_str)
if (
len(token_str) > _UniqueId.TOKEN_LENGTH_LONG
Expand Down
9 changes: 7 additions & 2 deletions tests/sentry/grouping/test_normalize_message.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,14 +170,19 @@
"""[401,""]""",
"""[<int>,""]""",
),
(
"Uniq ID - log messages ignored properly",
"Hard time limit (%ss) exceeded for %s[%s]",
"Hard time limit (%ss) exceeded for %s[%s]",
),
(
"Uniq ID - no change",
"""Blocked 'script' from 'wasm-eval:'""",
"""Blocked 'script' from 'wasm-eval:'""",
),
],
)
def test_normalize_message(name, input, expected):
def test_normalize_message(name: str, input: str, expected: str) -> None:
event = Event(project_id=1, event_id="something")
with override_options(
{
Expand Down Expand Up @@ -210,7 +215,7 @@ def test_normalize_message(name, input, expected):
),
],
)
def test_fail_to_normalize_message(name, input, expected):
def test_fail_to_normalize_message(name: str, input: str, expected: str) -> None:
event = Event(project_id=1, event_id="something")
with override_options(
{
Expand Down
Loading