Skip to content

Commit

Permalink
Don’t cache skipped tokens
Browse files Browse the repository at this point in the history
  • Loading branch information
SimonSapin committed Jul 21, 2017
1 parent 336a12e commit 999c0fc
Showing 1 changed file with 45 additions and 40 deletions.
85 changes: 45 additions & 40 deletions src/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,6 @@ impl<'i> ParserInput<'i> {
cached_token: None,
}
}

#[inline]
fn cached_token_ref(&self) -> &Token<'i> {
&self.cached_token.as_ref().unwrap().token
}
}

/// A CSS parser that borrows its `&str` input,
Expand Down Expand Up @@ -332,27 +327,15 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// See the `Parser::parse_nested_block` method to parse the content of functions or blocks.
///
/// This only returns a closing token when it is unmatched (and therefore an error).
#[inline]
pub fn next(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
loop {
match self.next_including_whitespace_and_comments() {
Err(e) => return Err(e),
Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) => {},
_ => break
}
}
Ok(self.input.cached_token_ref())
self.next_common(true, true)
}

/// Same as `Parser::next`, but does not skip whitespace tokens.
#[inline]
pub fn next_including_whitespace(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
loop {
match self.next_including_whitespace_and_comments() {
Err(e) => return Err(e),
Ok(&Token::Comment(_)) => {},
_ => break
}
}
Ok(self.input.cached_token_ref())
self.next_common(false, true)
}

/// Same as `Parser::next`, but does not skip whitespace or comment tokens.
Expand All @@ -361,37 +344,59 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// where comments are preserved.
/// When parsing higher-level values, per the CSS Syntax specification,
/// comments should always be ignored between tokens.
#[inline]
pub fn next_including_whitespace_and_comments(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
self.next_common(false, false)
}

fn next_common(&mut self, skip_whitespace: bool, skip_comments: bool)
-> Result<&Token<'i>, BasicParseError<'i>> {
let return_this_token = |token: &Token| {
match *token {
Token::WhiteSpace(_) if skip_whitespace => false,
Token::Comment(_) if skip_comments => false,
_ => true
}
};

if let Some(block_type) = self.at_start_of.take() {
consume_until_end_of_block(block_type, &mut self.input.tokenizer);
}

let byte = self.input.tokenizer.next_byte();
if self.stop_before.contains(Delimiters::from_byte(byte)) {
return Err(BasicParseError::EndOfInput)
}

let token_start_position = self.input.tokenizer.position();
let token;
match self.input.cached_token {
Some(ref cached_token) if cached_token.start_position == token_start_position => {
self.input.tokenizer.reset(cached_token.end_position);
token = &cached_token.token
loop {
let byte = self.input.tokenizer.next_byte();
if self.stop_before.contains(Delimiters::from_byte(byte)) {
return Err(BasicParseError::EndOfInput)
}
_ => {
let new_token = self.input.tokenizer.next().map_err(|()| BasicParseError::EndOfInput)?;
self.input.cached_token = Some(CachedToken {
token: new_token,
start_position: token_start_position,
end_position: self.input.tokenizer.position(),
});
token = self.input.cached_token_ref()

let token_start_position = self.input.tokenizer.position();
match self.input.cached_token {
Some(ref cached_token) if cached_token.start_position == token_start_position => {
self.input.tokenizer.reset(cached_token.end_position);
if return_this_token(&cached_token.token) {
break
}
}
_ => {
let new_token = self.input.tokenizer.next().map_err(|()| BasicParseError::EndOfInput)?;
if return_this_token(&new_token) {
self.input.cached_token = Some(CachedToken {
token: new_token,
start_position: token_start_position,
end_position: self.input.tokenizer.position(),
});
break
}
}
}
}

let token = &self.input.cached_token.as_ref().unwrap().token;

if let Some(block_type) = BlockType::opening(token) {
self.at_start_of = Some(block_type);
}

Ok(token)
}

Expand Down

0 comments on commit 999c0fc

Please sign in to comment.