Skip to content

Commit

Permalink
pythongh-119118: Fix performance regression in tokenize module (pytho…
Browse files Browse the repository at this point in the history
…n#119615)

* pythongh-119118: Fix performance regression in tokenize module

- Cache line object to avoid creating a Unicode object
  for all of the tokens in the same line.
- Speed up byte offset to column offset conversion by using the
  smallest buffer possible to measure the difference.

Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
  • Loading branch information
2 people authored and noahbkim committed Jul 11, 2024
1 parent 1652b47 commit e530584
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 4 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fix performance regression in the :mod:`tokenize` module by caching the ``line``
token attribute and calculating the column offset more efficiently.
25 changes: 25 additions & 0 deletions Parser/pegen.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,31 @@ _PyPegen_interactive_exit(Parser *p)
return NULL;
}

Py_ssize_t
_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset)
{
const char *data = PyUnicode_AsUTF8(line);

Py_ssize_t len = 0;
while (col_offset < end_col_offset) {
Py_UCS4 ch = data[col_offset];
if (ch < 0x80) {
col_offset += 1;
} else if ((ch & 0xe0) == 0xc0) {
col_offset += 2;
} else if ((ch & 0xf0) == 0xe0) {
col_offset += 3;
} else if ((ch & 0xf8) == 0xf0) {
col_offset += 4;
} else {
PyErr_SetString(PyExc_ValueError, "Invalid UTF-8 sequence");
return -1;
}
len++;
}
return len;
}

Py_ssize_t
_PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset)
{
Expand Down
1 change: 1 addition & 0 deletions Parser/pegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ int _PyPegen_fill_token(Parser *p);
expr_ty _PyPegen_name_token(Parser *p);
expr_ty _PyPegen_number_token(Parser *p);
void *_PyPegen_string_token(Parser *p);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset(PyObject *line, Py_ssize_t col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_raw(const char*, Py_ssize_t col_offset);

Expand Down
44 changes: 40 additions & 4 deletions Python/Python-tokenize.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ typedef struct
{
PyObject_HEAD struct tok_state *tok;
int done;

/* Needed to cache line for performance */
PyObject *last_line;
Py_ssize_t last_lineno;
Py_ssize_t byte_col_offset_diff;
} tokenizeriterobject;

/*[clinic input]
Expand Down Expand Up @@ -68,6 +73,11 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
self->tok->tok_extra_tokens = 1;
}
self->done = 0;

self->last_line = NULL;
self->byte_col_offset_diff = 0;
self->last_lineno = 0;

return (PyObject *)self;
}

Expand Down Expand Up @@ -210,7 +220,18 @@ tokenizeriter_next(tokenizeriterobject *it)
if (size >= 1 && it->tok->implicit_newline) {
size -= 1;
}
line = PyUnicode_DecodeUTF8(line_start, size, "replace");

if (it->tok->lineno != it->last_lineno) {
// Line has changed since last token, so we fetch the new line and cache it
// in the iter object.
Py_XDECREF(it->last_line);
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
it->last_line = line;
it->byte_col_offset_diff = 0;
} else {
// Line hasn't changed so we reuse the cached one.
line = it->last_line;
}
}
if (line == NULL) {
Py_DECREF(str);
Expand All @@ -219,13 +240,28 @@ tokenizeriter_next(tokenizeriterobject *it)

Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
Py_ssize_t end_lineno = it->tok->lineno;
it->last_lineno = lineno;

Py_ssize_t col_offset = -1;
Py_ssize_t end_col_offset = -1;
Py_ssize_t byte_offset = -1;
if (token.start != NULL && token.start >= line_start) {
col_offset = _PyPegen_byte_offset_to_character_offset(line, token.start - line_start);
byte_offset = token.start - line_start;
col_offset = byte_offset - it->byte_col_offset_diff;
}
if (token.end != NULL && token.end >= it->tok->line_start) {
end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, token.end - it->tok->line_start);
Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
if (lineno == end_lineno) {
// If the whole token is at the same line, we can just use the token.start
// buffer for figuring out the new column offset, since using line is not
// performant for very long lines.
Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset);
end_col_offset = col_offset + token_col_offset;
it->byte_col_offset_diff += token.end - token.start - token_col_offset;
} else {
end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset);
it->byte_col_offset_diff += end_byte_offset - end_col_offset;
}
}

if (it->tok->tok_extra_tokens) {
Expand Down Expand Up @@ -262,7 +298,7 @@ tokenizeriter_next(tokenizeriterobject *it)
}
}

result = Py_BuildValue("(iN(nn)(nn)N)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
result = Py_BuildValue("(iN(nn)(nn)O)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
exit:
_PyToken_Free(&token);
if (type == ENDMARKER) {
Expand Down

0 comments on commit e530584

Please sign in to comment.