// completely unresponsive connection.
pendingResets int
+ // readBeforeStreamID is the smallest stream ID that has not been followed by
+ // a frame read from the peer. We use this to determine when a request may
+ // have been sent to a completely unresponsive connection:
+ // If the request ID is less than readBeforeStreamID, then we have had some
+ // indication of life on the connection since sending the request.
+ readBeforeStreamID uint32
+
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
// Lock reqmu BEFORE mu or wmu.
reqHeaderMu chan struct{}
+ // internalStateHook reports state changes back to the net/http.ClientConn.
+ // Note that this is different from the user state hook registered by
+ // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn,
+ // which calls the user hook.
+ internalStateHook func()
+
// wmu is held while writing.
// Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
// Only acquire both at the same time when changing peer settings.
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
if t.transportTestHooks != nil {
- return t.newClientConn(nil, singleUse)
+ return t.newClientConn(nil, singleUse, nil)
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
if err != nil {
return nil, err
}
- return t.newClientConn(tconn, singleUse)
+ return t.newClientConn(tconn, singleUse, nil)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives())
+ return t.newClientConn(c, t.disableKeepAlives(), nil)
}
-func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) {
conf := configFromTransport(t)
cc := &ClientConn{
t: t,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
lastActive: time.Now(),
+ internalStateHook: internalStateHook,
}
if t.transportTestHooks != nil {
t.transportTestHooks.newclientconn(cc)
maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
}
- st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
- !cc.doNotReuse &&
- int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
- !cc.tooIdleLocked()
+ st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked()
// If this connection has never been used for a request and is closed,
// then let it take a request (which will fail).
return
}
+func (cc *ClientConn) isUsableLocked() bool {
+ return cc.goAway == nil &&
+ !cc.closed &&
+ !cc.closing &&
+ !cc.doNotReuse &&
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
+}
+
+// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn.
+//
+// This follows slightly different rules than clientConnIdleState.canTakeNewRequest.
+// We only permit reservations up to the conn's concurrency limit.
+// This differs from ClientConn.ReserveNewRequest, which permits reservations
+// past the limit when StrictMaxConcurrentStreams is set.
+func (cc *ClientConn) canReserveLocked() bool {
+ if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) {
+ return false
+ }
+ if !cc.isUsableLocked() {
+ return false
+ }
+ return true
+}
+
// currentRequestCountLocked reports the number of concurrency slots currently in use,
// including active streams, reserved slots, and reset streams waiting for acknowledgement.
func (cc *ClientConn) currentRequestCountLocked() int {
return st.canTakeNewRequest
}
+// availableLocked reports the number of concurrency slots available.
+func (cc *ClientConn) availableLocked() int {
+ if !cc.canTakeNewRequestLocked() {
+ return 0
+ }
+ return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked())
+}
+
// tooIdleLocked reports whether this connection has been been sitting idle
// for too much wall time.
func (cc *ClientConn) tooIdleLocked() bool {
t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
defer t.Stop()
cc.tconn.Close()
+ cc.maybeCallStateHook()
}
// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
}
bodyClosed := cs.reqBodyClosed
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
+ // Have we read any frames from the connection since sending this request?
+ readSinceStream := cc.readBeforeStreamID > cs.ID
cc.mu.Unlock()
if mustCloseBody {
cs.reqBody.Close()
//
// This could be due to the server becoming unresponsive.
// To avoid sending too many requests on a dead connection,
- // we let the request continue to consume a concurrency slot
- // until we can confirm the server is still responding.
+ // if we haven't read any frames from the connection since
+ // sending this request, we let it continue to consume
+ // a concurrency slot until we can confirm the server is
+ // still responding.
// We do this by sending a PING frame along with the RST_STREAM
// (unless a ping is already in flight).
//
// because it's short lived and will probably be closed before
// we get the ping response.
ping := false
- if !closeOnIdle {
+ if !closeOnIdle && !readSinceStream {
cc.mu.Lock()
// rstStreamPingsBlocked works around a gRPC behavior:
// see comment on the field for details.
}
close(cs.donec)
+ cc.maybeCallStateHook()
}
// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
// See comment on ClientConn.rstStreamPingsBlocked for details.
rl.cc.rstStreamPingsBlocked = false
}
+ rl.cc.readBeforeStreamID = rl.cc.nextStreamID
cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted {
return cs
func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
cc := rl.cc
+ defer cc.maybeCallStateHook()
cc.mu.Lock()
defer cc.mu.Unlock()
func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() {
cc := rl.cc
+ defer cc.maybeCallStateHook()
cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
}
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
-// if there's already has a cached connection to the host.
+// if there's already a cached connection to the host.
// (The field is exported so it can be accessed via reflect from net/http; tested
// by TestNoDialH2RoundTripperType)
+//
+// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol,
+// and the http1.Transport can use type assertions to call non-RoundTrip methods on it.
+// This lets us expose, for example, NewClientConn to net/http.
type noDialH2RoundTripper struct{ *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return res, err
}
+func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) {
+ tr := rt.Transport
+ cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook)
+ if err != nil {
+ return nil, err
+ }
+
+ // RoundTrip should block when the conn is at its concurrency limit,
+ // not return an error. Setting strictMaxConcurrentStreams enables this.
+ cc.strictMaxConcurrentStreams = true
+
+ return netHTTPClientConn{cc}, nil
+}
+
+// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from
+// the RoundTripper returned by NewClientConn.
+type netHTTPClientConn struct {
+ cc *ClientConn
+}
+
+func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ return cc.cc.RoundTrip(req)
+}
+
+func (cc netHTTPClientConn) Close() error {
+ return cc.cc.Close()
+}
+
+func (cc netHTTPClientConn) Err() error {
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ if cc.cc.closed {
+ return errors.New("connection closed")
+ }
+ return nil
+}
+
+func (cc netHTTPClientConn) Reserve() error {
+ defer cc.cc.maybeCallStateHook()
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ if !cc.cc.canReserveLocked() {
+ return errors.New("connection is unavailable")
+ }
+ cc.cc.streamsReserved++
+ return nil
+}
+
+func (cc netHTTPClientConn) Release() {
+ defer cc.cc.maybeCallStateHook()
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ // We don't complain if streamsReserved is 0.
+ //
+ // This is consistent with RoundTrip: both Release and RoundTrip will
+ // consume a reservation iff one exists.
+ if cc.cc.streamsReserved > 0 {
+ cc.cc.streamsReserved--
+ }
+}
+
+func (cc netHTTPClientConn) Available() int {
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ return cc.cc.availableLocked()
+}
+
+func (cc netHTTPClientConn) InFlight() int {
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ return cc.cc.currentRequestCountLocked()
+}
+
+func (cc *ClientConn) maybeCallStateHook() {
+ if cc.internalStateHook != nil {
+ cc.internalStateHook()
+ }
+}
+
func (t *Transport) idleConnTimeout() time.Duration {
// to keep things backwards compatible, we use non-zero values of
// IdleConnTimeout, followed by using the IdleConnTimeout on the underlying