http2: deflake TestServer_Rejects_Too_Many_Streams

This CL is the x/net counterpart to CL 755320.

This test contains a race condition in the server handler:

	inHandler <- streamID
	<-leaveHandler

We assume that all requests queue reading from leaveHandler in order,
but it is possible for the second request (stream id 3) to arrive at
leaveHandler before the first (stream id 1).

We could fix the race with a judicious synctest.Wait, but rewrite
the test to use serverHandlerCall to manipulate server handlers,
which permits us to precisely pick which request to unblock.

Fixes #69670

Change-Id: I9507d1dba07f7d62bcdc6c9bb67c47466a6a6964
Reviewed-on: https://go-review.googlesource.com/c/net/+/755081
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Nicholas Husin <husin@google.com>
Reviewed-by: Nicholas Husin <nsh@golang.org>
Reviewed-by: Nicholas Husin <husin@google.com>
This commit is contained in:
Damien Neil
2026-03-13 14:49:14 -07:00
committed by Gopher Robot
parent af2121a04e
commit 10ac4db832

View File

@@ -2416,22 +2416,7 @@ func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
synctestTest(t, testServer_Rejects_Too_Many_Streams)
}
func testServer_Rejects_Too_Many_Streams(t testing.TB) {
inHandler := make(chan uint32)
leaveHandler := make(chan bool)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
var streamID uint32
if _, err := fmt.Sscanf(r.URL.Path, "/%d", &streamID); err != nil {
t.Errorf("parsing %q: %v", r.URL.Path, err)
}
inHandler <- streamID
<-leaveHandler
})
defer st.Close()
// Automatically syncing after every write / before every read
// slows this test down substantially.
st.cc.(*synctestNetConn).autoWait = false
st := newServerTester(t, nil)
st.greet()
nextStreamID := uint32(1)
streamID := func() uint32 {
@@ -2448,15 +2433,11 @@ func testServer_Rejects_Too_Many_Streams(t testing.TB) {
EndHeaders: true,
})
}
var calls []*serverHandlerCall
for i := 0; i < DefaultMaxStreams; i++ {
sendReq(streamID())
<-inHandler
calls = append(calls, st.nextHandlerCall())
}
defer func() {
for i := 0; i < DefaultMaxStreams; i++ {
leaveHandler <- true
}
}()
// And this one should cross the limit:
// (It's also sent as a CONTINUATION, to verify we still track the decoder context,
@@ -2477,7 +2458,7 @@ func testServer_Rejects_Too_Many_Streams(t testing.TB) {
st.wantRSTStream(rejectID, ErrCodeProtocol)
// But let a handler finish:
leaveHandler <- true
calls[0].exit()
st.sync()
st.wantHeaders(wantHeader{
streamID: 1,
@@ -2487,8 +2468,9 @@ func testServer_Rejects_Too_Many_Streams(t testing.TB) {
// And now another stream should be able to start:
goodID := streamID()
sendReq(goodID)
if got := <-inHandler; got != goodID {
t.Errorf("Got stream %d; want %d", got, goodID)
call := st.nextHandlerCall()
if got, want := call.req.URL.Path, fmt.Sprintf("/%d", goodID); got != want {
t.Errorf("Got request for %q, want %q", got, want)
}
}