From 480e7b06ec3c006363895251ece1bf25d2386ede Mon Sep 17 00:00:00 2001 From: Michael Piatek Date: Thu, 2 Jan 2014 10:51:00 -0800 Subject: [PATCH] go.net/html: Tokenizer.Raw returns the original input when tokenizer errors occur. Two tweaks enable this: 1) Updating the raw and data span pointers when Tokenizer.Next is called, even if an error has occurred. This prevents duplicate data from being returned by Raw in the common case of an EOF. 2) Treating '' as an empty comment token to expose the raw text as a tokenization event. (This matches the semantics of other non-token events, e.g., '' is treated as ''.) Fixes golang/go#7029. R=golang-codereviews, r, bradfitz CC=golang-codereviews https://golang.org/cl/46370043 --- html/token.go | 16 +++++++--------- html/token_test.go | 23 +++++++++++++++++++++-- 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/html/token.go b/html/token.go index 3a1ee7cc..c43debbc 100644 --- a/html/token.go +++ b/html/token.go @@ -734,7 +734,6 @@ func (z *Tokenizer) readCDATA() bool { brackets = 0 } } - panic("unreachable") } // startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] @@ -934,13 +933,13 @@ func (z *Tokenizer) readTagAttrVal() { // Next scans the next token and returns its type. func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end if z.err != nil { z.tt = ErrorToken return z.tt } - z.raw.start = z.raw.end - z.data.start = z.raw.end - z.data.end = z.raw.end if z.rawTag != "" { if z.rawTag == "plaintext" { // Read everything up to EOF. @@ -1010,12 +1009,11 @@ loop: break loop } if c == '>' { - // "" does not generate a token at all. + // "" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. // Reset the tokenizer state and start again. - z.raw.start = z.raw.end - z.data.start = z.raw.end - z.data.end = z.raw.end - continue loop + z.tt = CommentToken + return z.tt } if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { z.readTag(false) diff --git a/html/token_test.go b/html/token_test.go index ca408da4..7d54d896 100644 --- a/html/token_test.go +++ b/html/token_test.go @@ -63,12 +63,12 @@ var tokenTests = []tokenTest{ { "not a tag #2", "", - "", + "", }, { "not a tag #3", "ab", - "a$b", + "a$$b", }, { "not a tag #4", @@ -469,6 +469,25 @@ loop: } } +func TestPassthrough(t *testing.T) { + // Accumulating the raw output for each parse event should reconstruct the + // original input. + for _, test := range tokenTests { + z := NewTokenizer(strings.NewReader(test.html)) + var parsed bytes.Buffer + for { + tt := z.Next() + parsed.Write(z.Raw()) + if tt == ErrorToken { + break + } + } + if got, want := parsed.String(), test.html; got != want { + t.Errorf("%s: parsed output:\n got: %q\nwant: %q", test.desc, got, want) + } + } +} + func TestBufAPI(t *testing.T) { s := "0123456789" z := NewTokenizer(bytes.NewBufferString(s))