mirror of https://github.com/grpc/grpc-go.git
Speed-up quota pools. (#1636)
* First commit. * First commit. * Second commit. * Post-review update.
This commit is contained in:
parent
af224a8a48
commit
ac0ac2b80e
|
@ -255,7 +255,7 @@ func (stats *Stats) maybeUpdate() {
|
|||
stats.dirty = false
|
||||
|
||||
if stats.durations.Len() != 0 {
|
||||
var percentToObserve = []int{50, 90}
|
||||
var percentToObserve = []int{50, 90, 99}
|
||||
// First data record min unit from the latency result.
|
||||
stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: stats.unit})
|
||||
for _, position := range percentToObserve {
|
||||
|
|
|
@ -20,9 +20,9 @@ package transport
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
|
@ -49,7 +49,7 @@ const (
|
|||
// defaultLocalSendQuota sets is default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultLocalSendQuota = 64 * 1024
|
||||
defaultLocalSendQuota = 128 * 1024
|
||||
)
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
|
@ -130,9 +130,8 @@ func (*ping) item() {}
|
|||
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
||||
// when it is available.
|
||||
type quotaPool struct {
|
||||
c chan int
|
||||
|
||||
mu sync.Mutex
|
||||
c chan struct{}
|
||||
version uint32
|
||||
quota int
|
||||
}
|
||||
|
@ -140,12 +139,8 @@ type quotaPool struct {
|
|||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||
func newQuotaPool(q int) *quotaPool {
|
||||
qb := "aPool{
|
||||
c: make(chan int, 1),
|
||||
}
|
||||
if q > 0 {
|
||||
qb.c <- q
|
||||
} else {
|
||||
qb.quota = q
|
||||
quota: q,
|
||||
c: make(chan struct{}, 1),
|
||||
}
|
||||
return qb
|
||||
}
|
||||
|
@ -159,60 +154,83 @@ func (qb *quotaPool) add(v int) {
|
|||
}
|
||||
|
||||
func (qb *quotaPool) lockedAdd(v int) {
|
||||
select {
|
||||
case n := <-qb.c:
|
||||
qb.quota += n
|
||||
default:
|
||||
var wakeUp bool
|
||||
if qb.quota <= 0 {
|
||||
wakeUp = true // Wake up potential watiers.
|
||||
}
|
||||
qb.quota += v
|
||||
if qb.quota <= 0 {
|
||||
return
|
||||
}
|
||||
// After the pool has been created, this is the only place that sends on
|
||||
// the channel. Since mu is held at this point and any quota that was sent
|
||||
// on the channel has been retrieved, we know that this code will always
|
||||
// place any positive quota value on the channel.
|
||||
select {
|
||||
case qb.c <- qb.quota:
|
||||
qb.quota = 0
|
||||
default:
|
||||
if wakeUp && qb.quota > 0 {
|
||||
select {
|
||||
case qb.c <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *quotaPool) addAndUpdate(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
qb.lockedAdd(v)
|
||||
// Update the version only after having added to the quota
|
||||
// so that if acquireWithVesrion sees the new vesrion it is
|
||||
// guaranteed to have seen the updated quota.
|
||||
// Also, still keep this inside of the lock, so that when
|
||||
// compareAndExecute is processing, this function doesn't
|
||||
// get executed partially (quota gets updated but the version
|
||||
// doesn't).
|
||||
atomic.AddUint32(&(qb.version), 1)
|
||||
qb.version++
|
||||
qb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) {
|
||||
return qb.c, atomic.LoadUint32(&(qb.version))
|
||||
func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
|
||||
qb.mu.Lock()
|
||||
if qb.quota > 0 {
|
||||
if v > qb.quota {
|
||||
v = qb.quota
|
||||
}
|
||||
qb.quota -= v
|
||||
ver := qb.version
|
||||
qb.mu.Unlock()
|
||||
return v, ver, nil
|
||||
}
|
||||
qb.mu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-wc.ctx.Done():
|
||||
return 0, 0, ContextErr(wc.ctx.Err())
|
||||
case <-wc.tctx.Done():
|
||||
return 0, 0, ErrConnClosing
|
||||
case <-wc.done:
|
||||
return 0, 0, io.EOF
|
||||
case <-wc.goAway:
|
||||
return 0, 0, ErrStreamDrain
|
||||
case <-qb.c:
|
||||
qb.mu.Lock()
|
||||
if qb.quota > 0 {
|
||||
if v > qb.quota {
|
||||
v = qb.quota
|
||||
}
|
||||
qb.quota -= v
|
||||
ver := qb.version
|
||||
if qb.quota > 0 {
|
||||
select {
|
||||
case qb.c <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
qb.mu.Unlock()
|
||||
return v, ver, nil
|
||||
|
||||
}
|
||||
qb.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
if version == atomic.LoadUint32(&(qb.version)) {
|
||||
if version == qb.version {
|
||||
success()
|
||||
qb.mu.Unlock()
|
||||
return true
|
||||
}
|
||||
failure()
|
||||
qb.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
// acquire returns the channel on which available quota amounts are sent.
|
||||
func (qb *quotaPool) acquire() <-chan int {
|
||||
return qb.c
|
||||
}
|
||||
|
||||
// inFlow deals with inbound flow control
|
||||
type inFlow struct {
|
||||
mu sync.Mutex
|
||||
|
|
|
@ -68,6 +68,9 @@ type http2Client struct {
|
|||
fc *inFlow
|
||||
// sendQuotaPool provides flow control to outbound message.
|
||||
sendQuotaPool *quotaPool
|
||||
// localSendQuota limits the amount of data that can be scheduled
|
||||
// for writing before it is actually written out.
|
||||
localSendQuota *quotaPool
|
||||
// streamsQuota limits the max number of concurrent streams.
|
||||
streamsQuota *quotaPool
|
||||
|
||||
|
@ -225,6 +228,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
|
|||
controlBuf: newControlBuffer(),
|
||||
fc: &inFlow{limit: uint32(icwz)},
|
||||
sendQuotaPool: newQuotaPool(defaultWindowSize),
|
||||
localSendQuota: newQuotaPool(defaultLocalSendQuota),
|
||||
scheme: scheme,
|
||||
state: reachable,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
|
@ -307,16 +311,15 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
|
|||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
id: t.nextID,
|
||||
done: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
buf: newRecvBuffer(),
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||
localSendQuota: newQuotaPool(defaultLocalSendQuota),
|
||||
headerChan: make(chan struct{}),
|
||||
id: t.nextID,
|
||||
done: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
buf: newRecvBuffer(),
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||
headerChan: make(chan struct{}),
|
||||
}
|
||||
t.nextID += 2
|
||||
s.requestRead = func(n int) {
|
||||
|
@ -336,7 +339,12 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
|||
t.updateWindow(s, uint32(n))
|
||||
},
|
||||
}
|
||||
|
||||
s.waiters = waiters{
|
||||
ctx: s.ctx,
|
||||
tctx: t.ctx,
|
||||
done: s.done,
|
||||
goAway: s.goAway,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -409,14 +417,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
return nil, ErrConnClosing
|
||||
}
|
||||
t.mu.Unlock()
|
||||
sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire())
|
||||
if err != nil {
|
||||
// Get a quota of 1 from streamsQuota.
|
||||
if _, _, err := t.streamsQuota.get(1, waiters{ctx: ctx, tctx: t.ctx}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Returns the quota balance back.
|
||||
if sq > 1 {
|
||||
t.streamsQuota.add(sq - 1)
|
||||
}
|
||||
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
// Make the slice of certain predictable size to reduce allocations made by append.
|
||||
|
@ -662,9 +666,7 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
var (
|
||||
streamQuota int
|
||||
streamQuotaVer uint32
|
||||
localSendQuota int
|
||||
err error
|
||||
sqChan <-chan int
|
||||
)
|
||||
for idx, r := range [][]byte{hdr, data} {
|
||||
for len(r) > 0 {
|
||||
|
@ -673,36 +675,31 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
size = len(r)
|
||||
}
|
||||
if streamQuota == 0 { // Used up all the locally cached stream quota.
|
||||
sqChan, streamQuotaVer = s.sendQuotaPool.acquireWithVersion()
|
||||
// Wait until the stream has some quota to send the data.
|
||||
streamQuota, err = wait(s.ctx, t.ctx, s.done, s.goAway, sqChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if localSendQuota <= 0 { // Being a soft limit, it can go negative.
|
||||
// Acquire local send quota to be able to write to the controlBuf.
|
||||
localSendQuota, err = wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire())
|
||||
// Get all the stream quota there is.
|
||||
streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if size > streamQuota {
|
||||
size = streamQuota
|
||||
} // No need to do that for localSendQuota since that's only a soft limit.
|
||||
// Wait until the transport has some quota to send the data.
|
||||
tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire())
|
||||
}
|
||||
|
||||
// Get size worth quota from transport.
|
||||
tq, _, err := t.sendQuotaPool.get(size, s.waiters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tq < size {
|
||||
size = tq
|
||||
}
|
||||
if tq > size { // Overbooked transport quota. Return it back.
|
||||
t.sendQuotaPool.add(tq - size)
|
||||
ltq, _, err := t.localSendQuota.get(size, s.waiters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// even if ltq is smaller than size we don't adjust size since
|
||||
// ltq is only a soft limit.
|
||||
streamQuota -= size
|
||||
localSendQuota -= size
|
||||
p := r[:size]
|
||||
var endStream bool
|
||||
// See if this is the last frame to be written.
|
||||
|
@ -718,8 +715,8 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
}
|
||||
}
|
||||
success := func() {
|
||||
sz := size
|
||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(sz) }})
|
||||
ltq := ltq
|
||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { t.localSendQuota.add(ltq) }})
|
||||
r = r[size:]
|
||||
}
|
||||
failure := func() { // The stream quota version must have changed.
|
||||
|
@ -729,7 +726,7 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
|
||||
// Couldn't send this chunk out.
|
||||
t.sendQuotaPool.add(size)
|
||||
localSendQuota += size
|
||||
t.localSendQuota.add(ltq)
|
||||
streamQuota = 0
|
||||
}
|
||||
}
|
||||
|
@ -737,9 +734,6 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
if streamQuota > 0 { // Add the left over quota back to stream.
|
||||
s.sendQuotaPool.add(streamQuota)
|
||||
}
|
||||
if localSendQuota > 0 {
|
||||
s.localSendQuota.add(localSendQuota)
|
||||
}
|
||||
if !opts.Last {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -70,7 +70,10 @@ type http2Server struct {
|
|||
fc *inFlow
|
||||
// sendQuotaPool provides flow control to outbound message.
|
||||
sendQuotaPool *quotaPool
|
||||
stats stats.Handler
|
||||
// localSendQuota limits the amount of data that can be scheduled
|
||||
// for writing before it is actually written out.
|
||||
localSendQuota *quotaPool
|
||||
stats stats.Handler
|
||||
// Flag to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
|
@ -199,6 +202,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
controlBuf: newControlBuffer(),
|
||||
fc: &inFlow{limit: uint32(icwz)},
|
||||
sendQuotaPool: newQuotaPool(defaultWindowSize),
|
||||
localSendQuota: newQuotaPool(defaultLocalSendQuota),
|
||||
state: reachable,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
streamSendQuota: defaultWindowSize,
|
||||
|
@ -316,7 +320,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
}
|
||||
t.maxStreamID = streamID
|
||||
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
|
||||
s.localSendQuota = newQuotaPool(defaultLocalSendQuota)
|
||||
t.activeStreams[streamID] = s
|
||||
if len(t.activeStreams) == 1 {
|
||||
t.idle = time.Time{}
|
||||
|
@ -346,6 +349,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
t.updateWindow(s, uint32(n))
|
||||
},
|
||||
}
|
||||
s.waiters = waiters{
|
||||
ctx: s.ctx,
|
||||
tctx: t.ctx,
|
||||
}
|
||||
handle(s)
|
||||
return
|
||||
}
|
||||
|
@ -861,9 +868,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
var (
|
||||
streamQuota int
|
||||
streamQuotaVer uint32
|
||||
localSendQuota int
|
||||
err error
|
||||
sqChan <-chan int
|
||||
)
|
||||
for _, r := range [][]byte{hdr, data} {
|
||||
for len(r) > 0 {
|
||||
|
@ -872,43 +877,38 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
size = len(r)
|
||||
}
|
||||
if streamQuota == 0 { // Used up all the locally cached stream quota.
|
||||
sqChan, streamQuotaVer = s.sendQuotaPool.acquireWithVersion()
|
||||
// Wait until the stream has some quota to send the data.
|
||||
streamQuota, err = wait(s.ctx, t.ctx, nil, nil, sqChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if localSendQuota <= 0 {
|
||||
localSendQuota, err = wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire())
|
||||
// Get all the stream quota there is.
|
||||
streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if size > streamQuota {
|
||||
size = streamQuota
|
||||
} // No need to do that for localSendQuota since that's only a soft limit.
|
||||
// Wait until the transport has some quota to send the data.
|
||||
tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire())
|
||||
}
|
||||
// Get size worth quota from transport.
|
||||
tq, _, err := t.sendQuotaPool.get(size, s.waiters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tq < size {
|
||||
size = tq
|
||||
}
|
||||
if tq > size {
|
||||
t.sendQuotaPool.add(tq - size)
|
||||
ltq, _, err := t.localSendQuota.get(size, s.waiters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// even if ltq is smaller than size we don't adjust size since,
|
||||
// ltq is only a soft limit.
|
||||
streamQuota -= size
|
||||
localSendQuota -= size
|
||||
p := r[:size]
|
||||
// Reset ping strikes when sending data since this might cause
|
||||
// the peer to send ping.
|
||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||
success := func() {
|
||||
sz := size
|
||||
ltq := ltq
|
||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
|
||||
s.localSendQuota.add(sz)
|
||||
t.localSendQuota.add(ltq)
|
||||
}})
|
||||
r = r[size:]
|
||||
}
|
||||
|
@ -919,7 +919,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
|
||||
// Couldn't send this chunk out.
|
||||
t.sendQuotaPool.add(size)
|
||||
localSendQuota += size
|
||||
t.localSendQuota.add(ltq)
|
||||
streamQuota = 0
|
||||
}
|
||||
}
|
||||
|
@ -928,9 +928,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
// ADd the left over quota back to stream.
|
||||
s.sendQuotaPool.add(streamQuota)
|
||||
}
|
||||
if localSendQuota > 0 {
|
||||
s.localSendQuota.add(localSendQuota)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -229,6 +229,7 @@ type Stream struct {
|
|||
trReader io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
waiters waiters
|
||||
|
||||
// TODO: Remote this unused variable.
|
||||
// The accumulated inbound quota pending for window update.
|
||||
|
@ -238,8 +239,7 @@ type Stream struct {
|
|||
// is used to adjust flow control, if need be.
|
||||
requestRead func(int)
|
||||
|
||||
sendQuotaPool *quotaPool
|
||||
localSendQuota *quotaPool
|
||||
sendQuotaPool *quotaPool
|
||||
// Close headerChan to indicate the end of reception of header metadata.
|
||||
headerChan chan struct{}
|
||||
// header caches the received header metadata.
|
||||
|
@ -703,25 +703,13 @@ func (e StreamError) Error() string {
|
|||
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
||||
}
|
||||
|
||||
// wait blocks until it can receive from one of the provided contexts or
|
||||
// channels. ctx is the context of the RPC, tctx is the context of the
|
||||
// transport, done is a channel closed to indicate the end of the RPC, goAway
|
||||
// is a channel closed to indicate a GOAWAY was received, and proceed is a
|
||||
// quota channel, whose received value is returned from this function if none
|
||||
// of the other signals occur first.
|
||||
func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
case <-done:
|
||||
return 0, io.EOF
|
||||
case <-goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case <-tctx.Done():
|
||||
return 0, ErrConnClosing
|
||||
case i := <-proceed:
|
||||
return i, nil
|
||||
}
|
||||
// waiters are passed to quotaPool get methods to
|
||||
// wait on in addition to waiting on quota.
|
||||
type waiters struct {
|
||||
ctx context.Context
|
||||
tctx context.Context
|
||||
done chan struct{}
|
||||
goAway chan struct{}
|
||||
}
|
||||
|
||||
// GoAwayReason contains the reason for the GoAway frame received.
|
||||
|
|
|
@ -1093,44 +1093,29 @@ func TestMaxStreams(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}()
|
||||
var failureReason string
|
||||
// Test these conditions untill they pass or
|
||||
// we reach the deadline (failure case).
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
case <-done:
|
||||
t.Fatalf(failureReason)
|
||||
t.Fatalf("streamsQuota.quota shouldn't be non-zero.")
|
||||
}
|
||||
select {
|
||||
case q := <-cc.streamsQuota.acquire():
|
||||
failureReason = "streamsQuota.acquire() becomes readable mistakenly."
|
||||
cc.streamsQuota.add(q)
|
||||
default:
|
||||
cc.streamsQuota.mu.Lock()
|
||||
quota := cc.streamsQuota.quota
|
||||
cc.streamsQuota.mu.Unlock()
|
||||
if quota != 0 {
|
||||
failureReason = "streamsQuota.quota got non-zero quota mistakenly."
|
||||
} else {
|
||||
failureReason = ""
|
||||
}
|
||||
}
|
||||
if failureReason == "" {
|
||||
cc.streamsQuota.mu.Lock()
|
||||
sq := cc.streamsQuota.quota
|
||||
cc.streamsQuota.mu.Unlock()
|
||||
if sq == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
close(ready)
|
||||
// Close the pending stream so that the streams quota becomes available for the next new stream.
|
||||
ct.CloseStream(s, nil)
|
||||
select {
|
||||
case i := <-cc.streamsQuota.acquire():
|
||||
if i != 1 {
|
||||
t.Fatalf("streamsQuota.acquire() got %d quota, want 1.", i)
|
||||
}
|
||||
cc.streamsQuota.add(i)
|
||||
default:
|
||||
t.Fatalf("streamsQuota.acquire() is not readable.")
|
||||
cc.streamsQuota.mu.Lock()
|
||||
i := cc.streamsQuota.quota
|
||||
cc.streamsQuota.mu.Unlock()
|
||||
if i != 1 {
|
||||
t.Fatalf("streamsQuota is %d, want 1.", i)
|
||||
}
|
||||
if _, err := ct.NewStream(context.Background(), callHdr); err != nil {
|
||||
t.Fatalf("Failed to open stream: %v", err)
|
||||
|
@ -1685,7 +1670,12 @@ func testAccountCheckWindowSize(t *testing.T, wc windowSizeConfig) {
|
|||
})
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
serverSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire())
|
||||
serverSendQuota, _, err := st.sendQuotaPool.get(math.MaxInt32, waiters{
|
||||
ctx: ctx,
|
||||
tctx: st.ctx,
|
||||
done: nil,
|
||||
goAway: nil,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while acquiring sendQuota on server. Err: %v", err)
|
||||
}
|
||||
|
@ -1707,7 +1697,12 @@ func testAccountCheckWindowSize(t *testing.T, wc windowSizeConfig) {
|
|||
t.Fatalf("Client transport flow control window size is %v, want %v", limit, connectOptions.InitialConnWindowSize)
|
||||
}
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
clientSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire())
|
||||
clientSendQuota, _, err := ct.sendQuotaPool.get(math.MaxInt32, waiters{
|
||||
ctx: ctx,
|
||||
tctx: ct.ctx,
|
||||
done: nil,
|
||||
goAway: nil,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while acquiring sendQuota on client. Err: %v", err)
|
||||
}
|
||||
|
@ -1849,7 +1844,12 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow conrtrol window on client stream is equal to out flow on server stream.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
serverStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, sstream.sendQuotaPool.acquire())
|
||||
serverStreamSendQuota, _, err := sstream.sendQuotaPool.get(math.MaxInt32, waiters{
|
||||
ctx: ctx,
|
||||
tctx: context.Background(),
|
||||
done: nil,
|
||||
goAway: nil,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquiring server stream send quota. Err: %v", err)
|
||||
|
@ -1864,7 +1864,12 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow control window on server stream is equal to out flow on client stream.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
clientStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, cstream.sendQuotaPool.acquire())
|
||||
clientStreamSendQuota, _, err := cstream.sendQuotaPool.get(math.MaxInt32, waiters{
|
||||
ctx: ctx,
|
||||
tctx: context.Background(),
|
||||
done: nil,
|
||||
goAway: nil,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquiring client stream send quota. Err: %v", err)
|
||||
|
@ -1879,7 +1884,12 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow control window on client transport is equal to out flow of server transport.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
serverTrSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire())
|
||||
serverTrSendQuota, _, err := st.sendQuotaPool.get(math.MaxInt32, waiters{
|
||||
ctx: ctx,
|
||||
tctx: st.ctx,
|
||||
done: nil,
|
||||
goAway: nil,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquring server transport send quota. Err: %v", err)
|
||||
|
@ -1894,7 +1904,12 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow control window on server transport is equal to out flow of client transport.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
clientTrSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire())
|
||||
clientTrSendQuota, _, err := ct.sendQuotaPool.get(math.MaxInt32, waiters{
|
||||
ctx: ctx,
|
||||
tctx: ct.ctx,
|
||||
done: nil,
|
||||
goAway: nil,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquiring client transport send quota. Err: %v", err)
|
||||
|
|
Loading…
Reference in New Issue