sync: resync vendor folder
This commit is contained in:
parent
a3cf88689b
commit
2308857ad3
File diff suppressed because it is too large
Load Diff
|
|
@ -32,7 +32,9 @@ var _ = math.Inf
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.ProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Permission_Type int32
|
type Permission_Type int32
|
||||||
|
|
||||||
|
|
@ -99,113 +101,113 @@ func init() {
|
||||||
proto.RegisterType((*Role)(nil), "authpb.Role")
|
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||||
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||||
}
|
}
|
||||||
func (m *User) Marshal() (data []byte, err error) {
|
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *User) MarshalTo(data []byte) (int, error) {
|
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if len(m.Name) > 0 {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
i += copy(data[i:], m.Name)
|
i += copy(dAtA[i:], m.Name)
|
||||||
}
|
}
|
||||||
if len(m.Password) > 0 {
|
if len(m.Password) > 0 {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||||
i += copy(data[i:], m.Password)
|
i += copy(dAtA[i:], m.Password)
|
||||||
}
|
}
|
||||||
if len(m.Roles) > 0 {
|
if len(m.Roles) > 0 {
|
||||||
for _, s := range m.Roles {
|
for _, s := range m.Roles {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
l = len(s)
|
l = len(s)
|
||||||
for l >= 1<<7 {
|
for l >= 1<<7 {
|
||||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
l >>= 7
|
l >>= 7
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = uint8(l)
|
dAtA[i] = uint8(l)
|
||||||
i++
|
i++
|
||||||
i += copy(data[i:], s)
|
i += copy(dAtA[i:], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) Marshal() (data []byte, err error) {
|
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Permission) MarshalTo(data []byte) (int, error) {
|
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.PermType != 0 {
|
if m.PermType != 0 {
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(m.PermType))
|
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||||
}
|
}
|
||||||
if len(m.Key) > 0 {
|
if len(m.Key) > 0 {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||||
i += copy(data[i:], m.Key)
|
i += copy(dAtA[i:], m.Key)
|
||||||
}
|
}
|
||||||
if len(m.RangeEnd) > 0 {
|
if len(m.RangeEnd) > 0 {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||||
i += copy(data[i:], m.RangeEnd)
|
i += copy(dAtA[i:], m.RangeEnd)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) Marshal() (data []byte, err error) {
|
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Role) MarshalTo(data []byte) (int, error) {
|
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Name) > 0 {
|
if len(m.Name) > 0 {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||||
i += copy(data[i:], m.Name)
|
i += copy(dAtA[i:], m.Name)
|
||||||
}
|
}
|
||||||
if len(m.KeyPermission) > 0 {
|
if len(m.KeyPermission) > 0 {
|
||||||
for _, msg := range m.KeyPermission {
|
for _, msg := range m.KeyPermission {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintAuth(data, i, uint64(msg.Size()))
|
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
||||||
n, err := msg.MarshalTo(data[i:])
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -215,31 +217,31 @@ func (m *Role) MarshalTo(data []byte) (int, error) {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
|
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
|
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintAuth(data []byte, offset int, v uint64) int {
|
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *User) Size() (n int) {
|
func (m *User) Size() (n int) {
|
||||||
|
|
@ -308,8 +310,8 @@ func sovAuth(x uint64) (n int) {
|
||||||
func sozAuth(x uint64) (n int) {
|
func sozAuth(x uint64) (n int) {
|
||||||
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
func (m *User) Unmarshal(data []byte) error {
|
func (m *User) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -321,7 +323,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -349,7 +351,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -363,7 +365,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Name == nil {
|
if m.Name == nil {
|
||||||
m.Name = []byte{}
|
m.Name = []byte{}
|
||||||
}
|
}
|
||||||
|
|
@ -380,7 +382,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -394,7 +396,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
|
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Password == nil {
|
if m.Password == nil {
|
||||||
m.Password = []byte{}
|
m.Password = []byte{}
|
||||||
}
|
}
|
||||||
|
|
@ -411,7 +413,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -426,11 +428,11 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
|
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipAuth(data[iNdEx:])
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -449,8 +451,8 @@ func (m *User) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Permission) Unmarshal(data []byte) error {
|
func (m *Permission) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -462,7 +464,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -490,7 +492,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -509,7 +511,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -523,7 +525,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
|
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Key == nil {
|
if m.Key == nil {
|
||||||
m.Key = []byte{}
|
m.Key = []byte{}
|
||||||
}
|
}
|
||||||
|
|
@ -540,7 +542,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -554,14 +556,14 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
|
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.RangeEnd == nil {
|
if m.RangeEnd == nil {
|
||||||
m.RangeEnd = []byte{}
|
m.RangeEnd = []byte{}
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipAuth(data[iNdEx:])
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -580,8 +582,8 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Role) Unmarshal(data []byte) error {
|
func (m *Role) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -593,7 +595,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -621,7 +623,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -635,7 +637,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Name == nil {
|
if m.Name == nil {
|
||||||
m.Name = []byte{}
|
m.Name = []byte{}
|
||||||
}
|
}
|
||||||
|
|
@ -652,7 +654,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -667,13 +669,13 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
||||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipAuth(data[iNdEx:])
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -692,8 +694,8 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipAuth(data []byte) (n int, err error) {
|
func skipAuth(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
|
|
@ -704,7 +706,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -722,7 +724,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -739,7 +741,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -762,7 +764,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -773,7 +775,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipAuth(data[start:])
|
next, err := skipAuth(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -797,6 +799,8 @@ var (
|
||||||
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||||
|
|
||||||
var fileDescriptorAuth = []byte{
|
var fileDescriptorAuth = []byte{
|
||||||
// 288 bytes of a gzipped FileDescriptorProto
|
// 288 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||||
|
|
|
||||||
|
|
@ -49,46 +49,37 @@ func isRangeEqual(a, b *rangePerm) bool {
|
||||||
|
|
||||||
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
||||||
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
||||||
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
// It returns a sorted rangePerm slice.
|
||||||
// TODO(mitake): currently it is O(n^2), we need a better algorithm
|
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
|
||||||
newp := make([]*rangePerm, 0)
|
sort.Sort(RangePermSliceByBegin(perms))
|
||||||
|
var prev *rangePerm
|
||||||
for i := range perms {
|
for i := range perms {
|
||||||
skip := false
|
if i == 0 {
|
||||||
|
prev = perms[i]
|
||||||
for j := range perms {
|
newp = append(newp, perms[i])
|
||||||
if i == j {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if isRangeEqual(perms[i], perms[j]) {
|
|
||||||
// if ranges are equal, we only keep the first range.
|
|
||||||
if i > j {
|
|
||||||
skip = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if isSubset(perms[i], perms[j]) {
|
|
||||||
// if a range is a strict subset of the other one, we skip the subset.
|
|
||||||
skip = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if skip {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if isRangeEqual(perms[i], prev) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isSubset(perms[i], prev) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isSubset(prev, perms[i]) {
|
||||||
|
prev = perms[i]
|
||||||
|
newp[len(newp)-1] = perms[i]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
prev = perms[i]
|
||||||
newp = append(newp, perms[i])
|
newp = append(newp, perms[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
return newp
|
return newp
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeRangePerms merges adjacent rangePerms.
|
// mergeRangePerms merges adjacent rangePerms.
|
||||||
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
||||||
merged := make([]*rangePerm, 0)
|
var merged []*rangePerm
|
||||||
perms = removeSubsetRangePerms(perms)
|
perms = removeSubsetRangePerms(perms)
|
||||||
sort.Sort(RangePermSliceByBegin(perms))
|
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for i < len(perms) {
|
for i < len(perms) {
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,9 @@ package auth
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -27,6 +30,73 @@ const (
|
||||||
defaultSimpleTokenLength = 16
|
defaultSimpleTokenLength = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// var for testing purposes
|
||||||
|
var (
|
||||||
|
simpleTokenTTL = 5 * time.Minute
|
||||||
|
simpleTokenTTLResolution = 1 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type simpleTokenTTLKeeper struct {
|
||||||
|
tokensMu sync.Mutex
|
||||||
|
tokens map[string]time.Time
|
||||||
|
stopCh chan chan struct{}
|
||||||
|
deleteTokenFunc func(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSimpleTokenTTLKeeper(deletefunc func(string)) *simpleTokenTTLKeeper {
|
||||||
|
stk := &simpleTokenTTLKeeper{
|
||||||
|
tokens: make(map[string]time.Time),
|
||||||
|
stopCh: make(chan chan struct{}),
|
||||||
|
deleteTokenFunc: deletefunc,
|
||||||
|
}
|
||||||
|
go stk.run()
|
||||||
|
return stk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) stop() {
|
||||||
|
waitCh := make(chan struct{})
|
||||||
|
tm.stopCh <- waitCh
|
||||||
|
<-waitCh
|
||||||
|
close(tm.stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||||
|
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||||
|
if _, ok := tm.tokens[token]; ok {
|
||||||
|
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||||
|
delete(tm.tokens, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *simpleTokenTTLKeeper) run() {
|
||||||
|
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||||
|
defer tokenTicker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tokenTicker.C:
|
||||||
|
nowtime := time.Now()
|
||||||
|
tm.tokensMu.Lock()
|
||||||
|
for t, tokenendtime := range tm.tokens {
|
||||||
|
if nowtime.After(tokenendtime) {
|
||||||
|
tm.deleteTokenFunc(t)
|
||||||
|
delete(tm.tokens, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tm.tokensMu.Unlock()
|
||||||
|
case waitCh := <-tm.stopCh:
|
||||||
|
tm.tokens = make(map[string]time.Time)
|
||||||
|
waitCh <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (as *authStore) GenSimpleToken() (string, error) {
|
func (as *authStore) GenSimpleToken() (string, error) {
|
||||||
ret := make([]byte, defaultSimpleTokenLength)
|
ret := make([]byte, defaultSimpleTokenLength)
|
||||||
|
|
||||||
|
|
@ -43,6 +113,7 @@ func (as *authStore) GenSimpleToken() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||||
|
as.simpleTokenKeeper.tokensMu.Lock()
|
||||||
as.simpleTokensMu.Lock()
|
as.simpleTokensMu.Lock()
|
||||||
|
|
||||||
_, ok := as.simpleTokens[token]
|
_, ok := as.simpleTokens[token]
|
||||||
|
|
@ -51,5 +122,23 @@ func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
as.simpleTokens[token] = username
|
as.simpleTokens[token] = username
|
||||||
|
as.simpleTokenKeeper.addSimpleToken(token)
|
||||||
as.simpleTokensMu.Unlock()
|
as.simpleTokensMu.Unlock()
|
||||||
|
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) invalidateUser(username string) {
|
||||||
|
if as.simpleTokenKeeper == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
as.simpleTokenKeeper.tokensMu.Lock()
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
for token, name := range as.simpleTokens {
|
||||||
|
if strings.Compare(name, username) == 0 {
|
||||||
|
delete(as.simpleTokens, token)
|
||||||
|
as.simpleTokenKeeper.deleteSimpleToken(token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,9 +16,11 @@ package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
|
@ -28,6 +30,7 @@ import (
|
||||||
"github.com/coreos/pkg/capnslog"
|
"github.com/coreos/pkg/capnslog"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -35,6 +38,8 @@ var (
|
||||||
authEnabled = []byte{1}
|
authEnabled = []byte{1}
|
||||||
authDisabled = []byte{0}
|
authDisabled = []byte{0}
|
||||||
|
|
||||||
|
revisionKey = []byte("authRevision")
|
||||||
|
|
||||||
authBucketName = []byte("auth")
|
authBucketName = []byte("auth")
|
||||||
authUsersBucketName = []byte("authUsers")
|
authUsersBucketName = []byte("authUsers")
|
||||||
authRolesBucketName = []byte("authRoles")
|
authRolesBucketName = []byte("authRoles")
|
||||||
|
|
@ -44,6 +49,7 @@ var (
|
||||||
ErrRootUserNotExist = errors.New("auth: root user does not exist")
|
ErrRootUserNotExist = errors.New("auth: root user does not exist")
|
||||||
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
|
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
|
||||||
ErrUserAlreadyExist = errors.New("auth: user already exists")
|
ErrUserAlreadyExist = errors.New("auth: user already exists")
|
||||||
|
ErrUserEmpty = errors.New("auth: user name is empty")
|
||||||
ErrUserNotFound = errors.New("auth: user not found")
|
ErrUserNotFound = errors.New("auth: user not found")
|
||||||
ErrRoleAlreadyExist = errors.New("auth: role already exists")
|
ErrRoleAlreadyExist = errors.New("auth: role already exists")
|
||||||
ErrRoleNotFound = errors.New("auth: role not found")
|
ErrRoleNotFound = errors.New("auth: role not found")
|
||||||
|
|
@ -51,13 +57,26 @@ var (
|
||||||
ErrPermissionDenied = errors.New("auth: permission denied")
|
ErrPermissionDenied = errors.New("auth: permission denied")
|
||||||
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
||||||
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
||||||
|
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
|
||||||
|
ErrAuthOldRevision = errors.New("auth: revision in header is old")
|
||||||
|
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
|
||||||
|
|
||||||
|
// BcryptCost is the algorithm cost / strength for hashing auth passwords
|
||||||
|
BcryptCost = bcrypt.DefaultCost
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
rootUser = "root"
|
rootUser = "root"
|
||||||
rootRole = "root"
|
rootRole = "root"
|
||||||
|
|
||||||
|
revBytesLen = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type AuthInfo struct {
|
||||||
|
Username string
|
||||||
|
Revision uint64
|
||||||
|
}
|
||||||
|
|
||||||
type AuthStore interface {
|
type AuthStore interface {
|
||||||
// AuthEnable turns on the authentication feature
|
// AuthEnable turns on the authentication feature
|
||||||
AuthEnable() error
|
AuthEnable() error
|
||||||
|
|
@ -110,23 +129,36 @@ type AuthStore interface {
|
||||||
// RoleList gets a list of all roles
|
// RoleList gets a list of all roles
|
||||||
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||||
|
|
||||||
// UsernameFromToken gets a username from the given Token
|
// AuthInfoFromToken gets a username from the given Token and current revision number
|
||||||
UsernameFromToken(token string) (string, bool)
|
// (The revision number is used for preventing the TOCTOU problem)
|
||||||
|
AuthInfoFromToken(token string) (*AuthInfo, bool)
|
||||||
|
|
||||||
// IsPutPermitted checks put permission of the user
|
// IsPutPermitted checks put permission of the user
|
||||||
IsPutPermitted(username string, key []byte) bool
|
IsPutPermitted(authInfo *AuthInfo, key []byte) error
|
||||||
|
|
||||||
// IsRangePermitted checks range permission of the user
|
// IsRangePermitted checks range permission of the user
|
||||||
IsRangePermitted(username string, key, rangeEnd []byte) bool
|
IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||||
|
|
||||||
// IsDeleteRangePermitted checks delete-range permission of the user
|
// IsDeleteRangePermitted checks delete-range permission of the user
|
||||||
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
|
IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||||
|
|
||||||
// IsAdminPermitted checks admin permission of the user
|
// IsAdminPermitted checks admin permission of the user
|
||||||
IsAdminPermitted(username string) bool
|
IsAdminPermitted(authInfo *AuthInfo) error
|
||||||
|
|
||||||
// GenSimpleToken produces a simple random string
|
// GenSimpleToken produces a simple random string
|
||||||
GenSimpleToken() (string, error)
|
GenSimpleToken() (string, error)
|
||||||
|
|
||||||
|
// Revision gets current revision of authStore
|
||||||
|
Revision() uint64
|
||||||
|
|
||||||
|
// CheckPassword checks a given pair of username and password is correct
|
||||||
|
CheckPassword(username, password string) (uint64, error)
|
||||||
|
|
||||||
|
// Close does cleanup of AuthStore
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// AuthInfoFromCtx gets AuthInfo from gRPC's context
|
||||||
|
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type authStore struct {
|
type authStore struct {
|
||||||
|
|
@ -136,11 +168,33 @@ type authStore struct {
|
||||||
|
|
||||||
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
||||||
|
|
||||||
simpleTokensMu sync.RWMutex
|
revision uint64
|
||||||
simpleTokens map[string]string // token -> username
|
|
||||||
|
// tokenSimple in v3.2+
|
||||||
|
indexWaiter func(uint64) <-chan struct{}
|
||||||
|
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||||
|
simpleTokensMu sync.Mutex
|
||||||
|
simpleTokens map[string]string // token -> username
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeleterFunc(as *authStore) func(string) {
|
||||||
|
return func(t string) {
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
defer as.simpleTokensMu.Unlock()
|
||||||
|
if username, ok := as.simpleTokens[t]; ok {
|
||||||
|
plog.Infof("deleting token %s for user %s", t, username)
|
||||||
|
delete(as.simpleTokens, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) AuthEnable() error {
|
func (as *authStore) AuthEnable() error {
|
||||||
|
as.enabledMu.Lock()
|
||||||
|
defer as.enabledMu.Unlock()
|
||||||
|
if as.enabled {
|
||||||
|
plog.Noticef("Authentication already enabled")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
b := as.be
|
b := as.be
|
||||||
tx := b.BatchTx()
|
tx := b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
|
|
@ -160,33 +214,64 @@ func (as *authStore) AuthEnable() error {
|
||||||
|
|
||||||
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
|
||||||
as.enabled = true
|
as.enabled = true
|
||||||
as.enabledMu.Unlock()
|
|
||||||
|
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||||
|
|
||||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||||
|
|
||||||
|
as.revision = getRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("Authentication enabled")
|
plog.Noticef("Authentication enabled")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) AuthDisable() {
|
func (as *authStore) AuthDisable() {
|
||||||
|
as.enabledMu.Lock()
|
||||||
|
defer as.enabledMu.Unlock()
|
||||||
|
if !as.enabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
b := as.be
|
b := as.be
|
||||||
tx := b.BatchTx()
|
tx := b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
||||||
|
as.commitRevision(tx)
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
b.ForceCommit()
|
b.ForceCommit()
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
|
||||||
as.enabled = false
|
as.enabled = false
|
||||||
as.enabledMu.Unlock()
|
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
if as.simpleTokenKeeper != nil {
|
||||||
|
as.simpleTokenKeeper.stop()
|
||||||
|
as.simpleTokenKeeper = nil
|
||||||
|
}
|
||||||
|
|
||||||
plog.Noticef("Authentication disabled")
|
plog.Noticef("Authentication disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) Close() error {
|
||||||
|
as.enabledMu.Lock()
|
||||||
|
defer as.enabledMu.Unlock()
|
||||||
|
if !as.enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if as.simpleTokenKeeper != nil {
|
||||||
|
as.simpleTokenKeeper.stop()
|
||||||
|
as.simpleTokenKeeper = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
||||||
|
if !as.isAuthEnabled() {
|
||||||
|
return nil, ErrAuthNotEnabled
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
||||||
index := ctx.Value("index").(uint64)
|
index := ctx.Value("index").(uint64)
|
||||||
simpleToken := ctx.Value("simpleToken").(string)
|
simpleToken := ctx.Value("simpleToken").(string)
|
||||||
|
|
@ -200,11 +285,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||||
return nil, ErrAuthFailed
|
return nil, ErrAuthFailed
|
||||||
}
|
}
|
||||||
|
|
||||||
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
|
||||||
plog.Noticef("authentication failed, invalid password for user %s", username)
|
|
||||||
return &pb.AuthenticateResponse{}, ErrAuthFailed
|
|
||||||
}
|
|
||||||
|
|
||||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||||
as.assignSimpleTokenToUser(username, token)
|
as.assignSimpleTokenToUser(username, token)
|
||||||
|
|
||||||
|
|
@ -212,6 +292,24 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||||
return &pb.AuthenticateResponse{Token: token}, nil
|
return &pb.AuthenticateResponse{Token: token}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||||
|
tx := as.be.BatchTx()
|
||||||
|
tx.Lock()
|
||||||
|
defer tx.Unlock()
|
||||||
|
|
||||||
|
user := getUser(tx, username)
|
||||||
|
if user == nil {
|
||||||
|
return 0, ErrAuthFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
||||||
|
plog.Noticef("authentication failed, invalid password for user %s", username)
|
||||||
|
return 0, ErrAuthFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
return getRevision(tx), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (as *authStore) Recover(be backend.Backend) {
|
func (as *authStore) Recover(be backend.Backend) {
|
||||||
enabled := false
|
enabled := false
|
||||||
as.be = be
|
as.be = be
|
||||||
|
|
@ -223,6 +321,9 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||||
enabled = true
|
enabled = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
as.revision = getRevision(tx)
|
||||||
|
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
as.enabledMu.Lock()
|
||||||
|
|
@ -231,7 +332,11 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
if len(r.Name) == 0 {
|
||||||
|
return nil, ErrUserEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("failed to hash password: %s", err)
|
plog.Errorf("failed to hash password: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -253,6 +358,8 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
||||||
|
|
||||||
putUser(tx, newUser)
|
putUser(tx, newUser)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("added a new user: %s", r.Name)
|
plog.Noticef("added a new user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserAddResponse{}, nil
|
return &pb.AuthUserAddResponse{}, nil
|
||||||
|
|
@ -270,6 +377,11 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
||||||
|
|
||||||
delUser(tx, r.Name)
|
delUser(tx, r.Name)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
as.invalidateUser(r.Name)
|
||||||
|
|
||||||
plog.Noticef("deleted a user: %s", r.Name)
|
plog.Noticef("deleted a user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserDeleteResponse{}, nil
|
return &pb.AuthUserDeleteResponse{}, nil
|
||||||
|
|
@ -278,7 +390,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
||||||
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||||
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
||||||
// If the cost is too high, we should move the encryption to outside of the raft
|
// If the cost is too high, we should move the encryption to outside of the raft
|
||||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("failed to hash password: %s", err)
|
plog.Errorf("failed to hash password: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -301,6 +413,11 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
||||||
|
|
||||||
putUser(tx, updatedUser)
|
putUser(tx, updatedUser)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
as.invalidateUser(r.Name)
|
||||||
|
|
||||||
plog.Noticef("changed a password of a user: %s", r.Name)
|
plog.Noticef("changed a password of a user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserChangePasswordResponse{}, nil
|
return &pb.AuthUserChangePasswordResponse{}, nil
|
||||||
|
|
@ -336,6 +453,8 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
||||||
|
|
||||||
as.invalidateCachedPerm(r.User)
|
as.invalidateCachedPerm(r.User)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
||||||
return &pb.AuthUserGrantRoleResponse{}, nil
|
return &pb.AuthUserGrantRoleResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
@ -351,11 +470,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
|
||||||
if user == nil {
|
if user == nil {
|
||||||
return nil, ErrUserNotFound
|
return nil, ErrUserNotFound
|
||||||
}
|
}
|
||||||
|
resp.Roles = append(resp.Roles, user.Roles...)
|
||||||
for _, role := range user.Roles {
|
|
||||||
resp.Roles = append(resp.Roles, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -404,6 +519,8 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
|
||||||
|
|
||||||
as.invalidateCachedPerm(r.Name)
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
||||||
return &pb.AuthUserRevokeRoleResponse{}, nil
|
return &pb.AuthUserRevokeRoleResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
@ -419,11 +536,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
||||||
if role == nil {
|
if role == nil {
|
||||||
return nil, ErrRoleNotFound
|
return nil, ErrRoleNotFound
|
||||||
}
|
}
|
||||||
|
resp.Perm = append(resp.Perm, role.KeyPermission...)
|
||||||
for _, perm := range role.KeyPermission {
|
|
||||||
resp.Perm = append(resp.Perm, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -473,6 +586,8 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
||||||
// It should be optimized.
|
// It should be optimized.
|
||||||
as.clearCachedPerm()
|
as.clearCachedPerm()
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
||||||
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
@ -501,6 +616,8 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
||||||
|
|
||||||
delRole(tx, r.Role)
|
delRole(tx, r.Role)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("deleted role %s", r.Role)
|
plog.Noticef("deleted role %s", r.Role)
|
||||||
return &pb.AuthRoleDeleteResponse{}, nil
|
return &pb.AuthRoleDeleteResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
@ -521,16 +638,24 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||||
|
|
||||||
putRole(tx, newRole)
|
putRole(tx, newRole)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("Role %s is created", r.Name)
|
plog.Noticef("Role %s is created", r.Name)
|
||||||
|
|
||||||
return &pb.AuthRoleAddResponse{}, nil
|
return &pb.AuthRoleAddResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) UsernameFromToken(token string) (string, bool) {
|
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||||
as.simpleTokensMu.RLock()
|
// same as '(t *tokenSimple) info' in v3.2+
|
||||||
defer as.simpleTokensMu.RUnlock()
|
as.simpleTokenKeeper.tokensMu.Lock()
|
||||||
t, ok := as.simpleTokens[token]
|
as.simpleTokensMu.Lock()
|
||||||
return t, ok
|
username, ok := as.simpleTokens[token]
|
||||||
|
if ok {
|
||||||
|
as.simpleTokenKeeper.resetSimpleToken(token)
|
||||||
|
}
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
as.simpleTokenKeeper.tokensMu.Unlock()
|
||||||
|
return &AuthInfo{Username: username, Revision: as.revision}, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
type permSlice []*authpb.Permission
|
type permSlice []*authpb.Permission
|
||||||
|
|
@ -582,15 +707,26 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
|
||||||
// It should be optimized.
|
// It should be optimized.
|
||||||
as.clearCachedPerm()
|
as.clearCachedPerm()
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
||||||
|
|
||||||
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
|
func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
|
||||||
// TODO(mitake): this function would be costly so we need a caching mechanism
|
// TODO(mitake): this function would be costly so we need a caching mechanism
|
||||||
if !as.isAuthEnabled() {
|
if !as.isAuthEnabled() {
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// only gets rev == 0 when passed AuthInfo{}; no user given
|
||||||
|
if revision == 0 {
|
||||||
|
return ErrUserEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
if revision < as.revision {
|
||||||
|
return ErrAuthOldRevision
|
||||||
}
|
}
|
||||||
|
|
||||||
tx := as.be.BatchTx()
|
tx := as.be.BatchTx()
|
||||||
|
|
@ -600,48 +736,55 @@ func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTy
|
||||||
user := getUser(tx, userName)
|
user := getUser(tx, userName)
|
||||||
if user == nil {
|
if user == nil {
|
||||||
plog.Errorf("invalid user name %s for permission checking", userName)
|
plog.Errorf("invalid user name %s for permission checking", userName)
|
||||||
return false
|
return ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
// root role should have permission on all ranges
|
// root role should have permission on all ranges
|
||||||
if hasRootRole(user) {
|
if hasRootRole(user) {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
|
func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
|
||||||
return as.isOpPermitted(username, key, nil, authpb.WRITE)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
|
func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||||
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
|
func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||||
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsAdminPermitted(username string) bool {
|
func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||||
if !as.isAuthEnabled() {
|
if !as.isAuthEnabled() {
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
if authInfo == nil {
|
||||||
|
return ErrUserEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
tx := as.be.BatchTx()
|
tx := as.be.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
defer tx.Unlock()
|
defer tx.Unlock()
|
||||||
|
|
||||||
u := getUser(tx, username)
|
u := getUser(tx, authInfo.Username)
|
||||||
if u == nil {
|
if u == nil {
|
||||||
return false
|
return ErrUserNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return hasRootRole(u)
|
if !hasRootRole(u) {
|
||||||
|
return ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
||||||
|
|
@ -745,7 +888,7 @@ func (as *authStore) isAuthEnabled() bool {
|
||||||
return as.enabled
|
return as.enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAuthStore(be backend.Backend) *authStore {
|
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
|
||||||
tx := be.BatchTx()
|
tx := be.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
|
|
||||||
|
|
@ -753,13 +896,35 @@ func NewAuthStore(be backend.Backend) *authStore {
|
||||||
tx.UnsafeCreateBucket(authUsersBucketName)
|
tx.UnsafeCreateBucket(authUsersBucketName)
|
||||||
tx.UnsafeCreateBucket(authRolesBucketName)
|
tx.UnsafeCreateBucket(authRolesBucketName)
|
||||||
|
|
||||||
|
enabled := false
|
||||||
|
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
|
||||||
|
if len(vs) == 1 {
|
||||||
|
if bytes.Equal(vs[0], authEnabled) {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
as := &authStore{
|
||||||
|
be: be,
|
||||||
|
simpleTokens: make(map[string]string),
|
||||||
|
revision: getRevision(tx),
|
||||||
|
indexWaiter: indexWaiter,
|
||||||
|
enabled: enabled,
|
||||||
|
rangePermCache: make(map[string]*unifiedRangePermissions),
|
||||||
|
}
|
||||||
|
|
||||||
|
if enabled {
|
||||||
|
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||||
|
}
|
||||||
|
|
||||||
|
if as.revision == 0 {
|
||||||
|
as.commitRevision(tx)
|
||||||
|
}
|
||||||
|
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
be.ForceCommit()
|
be.ForceCommit()
|
||||||
|
|
||||||
return &authStore{
|
return as
|
||||||
be: be,
|
|
||||||
simpleTokens: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasRootRole(u *authpb.User) bool {
|
func hasRootRole(u *authpb.User) bool {
|
||||||
|
|
@ -770,3 +935,67 @@ func hasRootRole(u *authpb.User) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||||
|
as.revision++
|
||||||
|
revBytes := make([]byte, revBytesLen)
|
||||||
|
binary.BigEndian.PutUint64(revBytes, as.revision)
|
||||||
|
tx.UnsafePut(authBucketName, revisionKey, revBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRevision(tx backend.BatchTx) uint64 {
|
||||||
|
_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
|
||||||
|
if len(vs) != 1 {
|
||||||
|
// this can happen in the initialization phase
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return binary.BigEndian.Uint64(vs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) Revision() uint64 {
|
||||||
|
return as.revision
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
|
||||||
|
splitted := strings.Split(token, ".")
|
||||||
|
if len(splitted) != 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
index, err := strconv.Atoi(splitted[1])
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-as.indexWaiter(uint64(index)):
|
||||||
|
return true
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||||
|
md, ok := metadata.FromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, tok := md["token"]
|
||||||
|
if !tok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
token := ts[0]
|
||||||
|
if !as.isValidSimpleToken(token, ctx) {
|
||||||
|
return nil, ErrInvalidAuthToken
|
||||||
|
}
|
||||||
|
|
||||||
|
authInfo, uok := as.AuthInfoFromToken(token)
|
||||||
|
if !uok {
|
||||||
|
plog.Warningf("invalid auth token: %s", token)
|
||||||
|
return nil, ErrInvalidAuthToken
|
||||||
|
}
|
||||||
|
return authInfo, nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -114,4 +114,4 @@ if err != nil {
|
||||||
|
|
||||||
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
||||||
|
|
||||||
4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265).
|
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
@ -261,53 +260,67 @@ type httpClusterClient struct {
|
||||||
selectionMode EndpointSelectionMode
|
selectionMode EndpointSelectionMode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
|
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||||
mAPI := NewMembersAPI(c)
|
ceps := make([]url.URL, len(eps))
|
||||||
leader, err := mAPI.Leader(context.Background())
|
copy(ceps, eps)
|
||||||
|
|
||||||
|
// To perform a lookup on the new endpoint list without using the current
|
||||||
|
// client, we'll copy it
|
||||||
|
clientCopy := &httpClusterClient{
|
||||||
|
clientFactory: c.clientFactory,
|
||||||
|
credentials: c.credentials,
|
||||||
|
rand: c.rand,
|
||||||
|
|
||||||
|
pinned: 0,
|
||||||
|
endpoints: ceps,
|
||||||
|
}
|
||||||
|
|
||||||
|
mAPI := NewMembersAPI(clientCopy)
|
||||||
|
leader, err := mAPI.Leader(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if len(leader.ClientURLs) == 0 {
|
||||||
|
return "", ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||||
if len(eps) == 0 {
|
if len(eps) == 0 {
|
||||||
return ErrNoEndpoints
|
return []url.URL{}, ErrNoEndpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
neps := make([]url.URL, len(eps))
|
neps := make([]url.URL, len(eps))
|
||||||
for i, ep := range eps {
|
for i, ep := range eps {
|
||||||
u, err := url.Parse(ep)
|
u, err := url.Parse(ep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return []url.URL{}, err
|
||||||
}
|
}
|
||||||
neps[i] = *u
|
neps[i] = *u
|
||||||
}
|
}
|
||||||
|
return neps, nil
|
||||||
|
}
|
||||||
|
|
||||||
switch c.selectionMode {
|
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||||
case EndpointSelectionRandom:
|
neps, err := c.parseEndpoints(eps)
|
||||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
if err != nil {
|
||||||
c.pinned = 0
|
return err
|
||||||
case EndpointSelectionPrioritizeLeader:
|
|
||||||
c.endpoints = neps
|
|
||||||
lep, err := c.getLeaderEndpoint()
|
|
||||||
if err != nil {
|
|
||||||
return ErrNoLeaderEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range c.endpoints {
|
|
||||||
if c.endpoints[i].String() == lep {
|
|
||||||
c.pinned = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
|
||||||
// Forwarding between follower and leader would be required but it works.
|
|
||||||
default:
|
|
||||||
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||||
|
// We're not doing anything for PrioritizeLeader here. This is
|
||||||
|
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||||
|
// However, if you're using PrioritizeLeader, you've already been told
|
||||||
|
// to regularly call sync, where we do have a ctx, and can figure the
|
||||||
|
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||||
|
// with it
|
||||||
|
c.pinned = 0
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -401,27 +414,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Lock()
|
var eps []string
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
eps := make([]string, 0)
|
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
eps = append(eps, m.ClientURLs...)
|
eps = append(eps, m.ClientURLs...)
|
||||||
}
|
}
|
||||||
sort.Sort(sort.StringSlice(eps))
|
|
||||||
|
|
||||||
ceps := make([]string, len(c.endpoints))
|
neps, err := c.parseEndpoints(eps)
|
||||||
for i, cep := range c.endpoints {
|
if err != nil {
|
||||||
ceps[i] = cep.String()
|
return err
|
||||||
}
|
|
||||||
sort.Sort(sort.StringSlice(ceps))
|
|
||||||
// fast path if no change happens
|
|
||||||
// this helps client to pin the endpoint when no cluster change
|
|
||||||
if reflect.DeepEqual(eps, ceps) {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.SetEndpoints(eps)
|
npin := 0
|
||||||
|
|
||||||
|
switch c.selectionMode {
|
||||||
|
case EndpointSelectionRandom:
|
||||||
|
c.RLock()
|
||||||
|
eq := endpointsEqual(c.endpoints, neps)
|
||||||
|
c.RUnlock()
|
||||||
|
|
||||||
|
if eq {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// When items in the endpoint list changes, we choose a new pin
|
||||||
|
neps = shuffleEndpoints(c.rand, neps)
|
||||||
|
case EndpointSelectionPrioritizeLeader:
|
||||||
|
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||||
|
if err != nil {
|
||||||
|
return ErrNoLeaderEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range neps {
|
||||||
|
if n.String() == nle {
|
||||||
|
npin = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
c.endpoints = neps
|
||||||
|
c.pinned = npin
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||||
|
|
@ -607,3 +644,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||||
}
|
}
|
||||||
return neps
|
return neps
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func endpointsEqual(left, right []url.URL) bool {
|
||||||
|
if len(left) != len(right) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
sLeft := make([]string, len(left))
|
||||||
|
sRight := make([]string, len(right))
|
||||||
|
for i, l := range left {
|
||||||
|
sLeft[i] = l.String()
|
||||||
|
}
|
||||||
|
for i, r := range right {
|
||||||
|
sRight[i] = r.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(sLeft)
|
||||||
|
sort.Strings(sRight)
|
||||||
|
for i := range sLeft {
|
||||||
|
if sLeft[i] != sRight[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,11 @@ type ClusterError struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Error() string {
|
func (ce *ClusterError) Error() string {
|
||||||
return ErrClusterUnavailable.Error()
|
s := ErrClusterUnavailable.Error()
|
||||||
|
for i, e := range ce.Errors {
|
||||||
|
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Detail() string {
|
func (ce *ClusterError) Detail() string {
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -191,6 +191,10 @@ type SetOptions struct {
|
||||||
|
|
||||||
// Dir specifies whether or not this Node should be created as a directory.
|
// Dir specifies whether or not this Node should be created as a directory.
|
||||||
Dir bool
|
Dir bool
|
||||||
|
|
||||||
|
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||||
|
// If set, the response will only contain the current value when the request fails.
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetOptions struct {
|
type GetOptions struct {
|
||||||
|
|
@ -268,6 +272,10 @@ type Response struct {
|
||||||
// Index holds the cluster-level index at the time the Response was generated.
|
// Index holds the cluster-level index at the time the Response was generated.
|
||||||
// This index is not tied to the Node(s) contained in this Response.
|
// This index is not tied to the Node(s) contained in this Response.
|
||||||
Index uint64 `json:"-"`
|
Index uint64 `json:"-"`
|
||||||
|
|
||||||
|
// ClusterID holds the cluster-level ID reported by the server. This
|
||||||
|
// should be different for different etcd clusters.
|
||||||
|
ClusterID string `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Node struct {
|
||||||
|
|
@ -335,6 +343,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
|
||||||
act.TTL = opts.TTL
|
act.TTL = opts.TTL
|
||||||
act.Refresh = opts.Refresh
|
act.Refresh = opts.Refresh
|
||||||
act.Dir = opts.Dir
|
act.Dir = opts.Dir
|
||||||
|
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||||
}
|
}
|
||||||
|
|
||||||
doCtx := ctx
|
doCtx := ctx
|
||||||
|
|
@ -523,15 +532,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
}
|
}
|
||||||
|
|
||||||
type setAction struct {
|
type setAction struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Key string
|
Key string
|
||||||
Value string
|
Value string
|
||||||
PrevValue string
|
PrevValue string
|
||||||
PrevIndex uint64
|
PrevIndex uint64
|
||||||
PrevExist PrevExistType
|
PrevExist PrevExistType
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
Refresh bool
|
Refresh bool
|
||||||
Dir bool
|
Dir bool
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
|
|
@ -565,6 +575,9 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
if a.Refresh {
|
if a.Refresh {
|
||||||
form.Add("refresh", "true")
|
form.Add("refresh", "true")
|
||||||
}
|
}
|
||||||
|
if a.NoValueOnSuccess {
|
||||||
|
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||||
|
}
|
||||||
|
|
||||||
u.RawQuery = params.Encode()
|
u.RawQuery = params.Encode()
|
||||||
body := strings.NewReader(form.Encode())
|
body := strings.NewReader(form.Encode())
|
||||||
|
|
@ -656,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,20 @@
|
||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
roleNotFoundRegExp *regexp.Regexp
|
||||||
|
userNotFoundRegExp *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||||
|
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||||
|
}
|
||||||
|
|
||||||
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||||
func IsKeyNotFound(err error) bool {
|
func IsKeyNotFound(err error) bool {
|
||||||
if cErr, ok := err.(Error); ok {
|
if cErr, ok := err.(Error); ok {
|
||||||
|
|
@ -21,3 +35,19 @@ func IsKeyNotFound(err error) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||||
|
func IsRoleNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||||
|
func IsUserNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return userNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -72,6 +72,10 @@ if err != nil {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,7 @@ type (
|
||||||
AuthRoleListResponse pb.AuthRoleListResponse
|
AuthRoleListResponse pb.AuthRoleListResponse
|
||||||
|
|
||||||
PermissionType authpb.Permission_Type
|
PermissionType authpb.Permission_Type
|
||||||
|
Permission authpb.Permission
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -115,12 +116,12 @@ func NewAuth(c *Client) Auth {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,14 @@ import (
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||||
|
// any active connection to endpoints at the time.
|
||||||
|
// This error is returned only when opts.BlockingWait is true.
|
||||||
|
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||||
|
|
||||||
// simpleBalancer does the bare minimum to expose multiple eps
|
// simpleBalancer does the bare minimum to expose multiple eps
|
||||||
// to the grpc reconnection code path
|
// to the grpc reconnection code path
|
||||||
type simpleBalancer struct {
|
type simpleBalancer struct {
|
||||||
|
|
@ -42,6 +48,11 @@ type simpleBalancer struct {
|
||||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||||
upc chan struct{}
|
upc chan struct{}
|
||||||
|
|
||||||
|
// grpc issues TLS cert checks using the string passed into dial so
|
||||||
|
// that string must be the host. To recover the full scheme://host URL,
|
||||||
|
// have a map from hosts to the original endpoint.
|
||||||
|
host2ep map[string]string
|
||||||
|
|
||||||
// pinAddr is the currently pinned address; set to the empty string on
|
// pinAddr is the currently pinned address; set to the empty string on
|
||||||
// intialization and shutdown.
|
// intialization and shutdown.
|
||||||
pinAddr string
|
pinAddr string
|
||||||
|
|
@ -62,11 +73,12 @@ func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||||
readyc: make(chan struct{}),
|
readyc: make(chan struct{}),
|
||||||
upEps: make(map[string]struct{}),
|
upEps: make(map[string]struct{}),
|
||||||
upc: make(chan struct{}),
|
upc: make(chan struct{}),
|
||||||
|
host2ep: getHost2ep(eps),
|
||||||
}
|
}
|
||||||
return sb
|
return sb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *simpleBalancer) Start(target string) error { return nil }
|
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||||
|
|
||||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
|
|
@ -74,6 +86,49 @@ func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||||
return b.upc
|
return b.upc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.host2ep[host]
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHost2ep(eps []string) map[string]string {
|
||||||
|
hm := make(map[string]string, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
_, host, _ := parseEndpoint(eps[i])
|
||||||
|
hm[host] = eps[i]
|
||||||
|
}
|
||||||
|
return hm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||||
|
np := getHost2ep(eps)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
match := len(np) == len(b.host2ep)
|
||||||
|
for k, v := range np {
|
||||||
|
if b.host2ep[k] != v {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
// same endpoints, so no need to update address
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.host2ep = np
|
||||||
|
|
||||||
|
addrs := make([]grpc.Address, 0, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||||
|
}
|
||||||
|
b.addrs = addrs
|
||||||
|
b.notifyCh <- addrs
|
||||||
|
}
|
||||||
|
|
||||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
@ -113,6 +168,25 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||||
|
|
||||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||||
var addr string
|
var addr string
|
||||||
|
|
||||||
|
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||||
|
// an address it has notified via Notify immediately instead of blocking.
|
||||||
|
if !opts.BlockingWait {
|
||||||
|
b.mu.RLock()
|
||||||
|
closed := b.closed
|
||||||
|
addr = b.pinAddr
|
||||||
|
upEps := len(b.upEps)
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if closed {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
|
||||||
|
if upEps == 0 {
|
||||||
|
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
b.mu.RLock()
|
b.mu.RLock()
|
||||||
ch := b.upc
|
ch := b.upc
|
||||||
|
|
|
||||||
|
|
@ -18,17 +18,18 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
@ -46,11 +47,12 @@ type Client struct {
|
||||||
Auth
|
Auth
|
||||||
Maintenance
|
Maintenance
|
||||||
|
|
||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
cfg Config
|
cfg Config
|
||||||
creds *credentials.TransportCredentials
|
creds *credentials.TransportCredentials
|
||||||
balancer *simpleBalancer
|
balancer *simpleBalancer
|
||||||
retryWrapper retryRpcFunc
|
retryWrapper retryRpcFunc
|
||||||
|
retryAuthWrapper retryRpcFunc
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
@ -59,6 +61,8 @@ type Client struct {
|
||||||
Username string
|
Username string
|
||||||
// Password is a password for authentication
|
// Password is a password for authentication
|
||||||
Password string
|
Password string
|
||||||
|
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||||
|
tokenCred *authTokenCredential
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new etcdv3 client from a given configuration.
|
// New creates a new etcdv3 client from a given configuration.
|
||||||
|
|
@ -87,6 +91,8 @@ func NewFromConfigFile(path string) (*Client, error) {
|
||||||
// Close shuts down the client's etcd connections.
|
// Close shuts down the client's etcd connections.
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
c.cancel()
|
c.cancel()
|
||||||
|
c.Watcher.Close()
|
||||||
|
c.Lease.Close()
|
||||||
return toErr(c.ctx, c.conn.Close())
|
return toErr(c.ctx, c.conn.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -96,10 +102,54 @@ func (c *Client) Close() error {
|
||||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||||
|
|
||||||
// Endpoints lists the registered endpoints for the client.
|
// Endpoints lists the registered endpoints for the client.
|
||||||
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
|
func (c *Client) Endpoints() (eps []string) {
|
||||||
|
// copy the slice; protect original endpoints from being changed
|
||||||
|
eps = make([]string, len(c.cfg.Endpoints))
|
||||||
|
copy(eps, c.cfg.Endpoints)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEndpoints updates client's endpoints.
|
||||||
|
func (c *Client) SetEndpoints(eps ...string) {
|
||||||
|
c.cfg.Endpoints = eps
|
||||||
|
c.balancer.updateAddrs(eps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||||
|
func (c *Client) Sync(ctx context.Context) error {
|
||||||
|
mresp, err := c.MemberList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var eps []string
|
||||||
|
for _, m := range mresp.Members {
|
||||||
|
eps = append(eps, m.ClientURLs...)
|
||||||
|
}
|
||||||
|
c.SetEndpoints(eps...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) autoSync() {
|
||||||
|
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(c.cfg.AutoSyncInterval):
|
||||||
|
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||||
|
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||||
|
logger.Println("Auto sync endpoints failed:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type authTokenCredential struct {
|
type authTokenCredential struct {
|
||||||
token string
|
token string
|
||||||
|
tokenMu *sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||||
|
|
@ -107,24 +157,38 @@ func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||||
|
cred.tokenMu.RLock()
|
||||||
|
defer cred.tokenMu.RUnlock()
|
||||||
return map[string]string{
|
return map[string]string{
|
||||||
"token": cred.token,
|
"token": cred.token,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
|
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||||
proto = "tcp"
|
proto = "tcp"
|
||||||
host = endpoint
|
host = endpoint
|
||||||
creds = c.creds
|
|
||||||
url, uerr := url.Parse(endpoint)
|
url, uerr := url.Parse(endpoint)
|
||||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
scheme = url.Scheme
|
||||||
|
|
||||||
// strip scheme:// prefix since grpc dials by host
|
// strip scheme:// prefix since grpc dials by host
|
||||||
host = url.Host
|
host = url.Host
|
||||||
switch url.Scheme {
|
switch url.Scheme {
|
||||||
|
case "http", "https":
|
||||||
case "unix":
|
case "unix":
|
||||||
proto = "unix"
|
proto = "unix"
|
||||||
|
default:
|
||||||
|
proto, host = "", ""
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||||
|
creds = c.creds
|
||||||
|
switch scheme {
|
||||||
|
case "unix":
|
||||||
case "http":
|
case "http":
|
||||||
creds = nil
|
creds = nil
|
||||||
case "https":
|
case "https":
|
||||||
|
|
@ -135,7 +199,7 @@ func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *
|
||||||
emptyCreds := credentials.NewTLS(tlsconfig)
|
emptyCreds := credentials.NewTLS(tlsconfig)
|
||||||
creds = &emptyCreds
|
creds = &emptyCreds
|
||||||
default:
|
default:
|
||||||
return "", "", nil
|
creds = nil
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -147,17 +211,8 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||||
}
|
}
|
||||||
opts = append(opts, dopts...)
|
opts = append(opts, dopts...)
|
||||||
|
|
||||||
// grpc issues TLS cert checks using the string passed into dial so
|
|
||||||
// that string must be the host. To recover the full scheme://host URL,
|
|
||||||
// have a map from hosts to the original endpoint.
|
|
||||||
host2ep := make(map[string]string)
|
|
||||||
for i := range c.cfg.Endpoints {
|
|
||||||
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
|
|
||||||
host2ep[host] = c.cfg.Endpoints[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||||
proto, host, _ := c.dialTarget(host2ep[host])
|
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||||
if proto == "" {
|
if proto == "" {
|
||||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||||
}
|
}
|
||||||
|
|
@ -166,11 +221,15 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||||
return nil, c.ctx.Err()
|
return nil, c.ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return net.DialTimeout(proto, host, t)
|
dialer := &net.Dialer{Timeout: t}
|
||||||
|
return dialer.DialContext(c.ctx, proto, host)
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithDialer(f))
|
opts = append(opts, grpc.WithDialer(f))
|
||||||
|
|
||||||
_, _, creds := c.dialTarget(endpoint)
|
creds := c.creds
|
||||||
|
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
||||||
|
creds = c.processCreds(scheme)
|
||||||
|
}
|
||||||
if creds != nil {
|
if creds != nil {
|
||||||
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -185,24 +244,56 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||||
return c.dial(endpoint)
|
return c.dial(endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) getToken(ctx context.Context) error {
|
||||||
|
var err error // return last error in a case of fail
|
||||||
|
var auth *authenticator
|
||||||
|
|
||||||
|
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||||
|
endpoint := c.cfg.Endpoints[i]
|
||||||
|
host := getHost(endpoint)
|
||||||
|
// use dial options without dopts to avoid reusing the client balancer
|
||||||
|
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer auth.close()
|
||||||
|
|
||||||
|
var resp *AuthenticateResponse
|
||||||
|
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c.tokenCred.tokenMu.Lock()
|
||||||
|
c.tokenCred.token = resp.Token
|
||||||
|
c.tokenCred.tokenMu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||||
host := getHost(endpoint)
|
host := getHost(endpoint)
|
||||||
if c.Username != "" && c.Password != "" {
|
if c.Username != "" && c.Password != "" {
|
||||||
// use dial options without dopts to avoid reusing the client balancer
|
c.tokenCred = &authTokenCredential{
|
||||||
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
|
tokenMu: &sync.RWMutex{},
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
defer auth.close()
|
|
||||||
|
|
||||||
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
|
err := c.getToken(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
|
|
||||||
|
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add metrics options
|
||||||
|
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
|
||||||
|
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
|
||||||
|
|
||||||
conn, err := grpc.Dial(host, opts...)
|
conn, err := grpc.Dial(host, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -248,6 +339,7 @@ func newClient(cfg *Config) (*Client, error) {
|
||||||
}
|
}
|
||||||
client.conn = conn
|
client.conn = conn
|
||||||
client.retryWrapper = client.newRetryWrapper()
|
client.retryWrapper = client.newRetryWrapper()
|
||||||
|
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||||
|
|
||||||
// wait for a connection
|
// wait for a connection
|
||||||
if cfg.DialTimeout > 0 {
|
if cfg.DialTimeout > 0 {
|
||||||
|
|
@ -272,13 +364,8 @@ func newClient(cfg *Config) (*Client, error) {
|
||||||
client.Watcher = NewWatcher(client)
|
client.Watcher = NewWatcher(client)
|
||||||
client.Auth = NewAuth(client)
|
client.Auth = NewAuth(client)
|
||||||
client.Maintenance = NewMaintenance(client)
|
client.Maintenance = NewMaintenance(client)
|
||||||
if cfg.Logger != nil {
|
|
||||||
logger.Set(cfg.Logger)
|
|
||||||
} else {
|
|
||||||
// disable client side grpc by default
|
|
||||||
logger.Set(log.New(ioutil.Discard, "", 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
go client.autoSync()
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -294,17 +381,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
eErr := rpctypes.Error(err)
|
code := grpc.Code(err)
|
||||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
// Unavailable codes mean the system will be right back.
|
||||||
return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
|
// (e.g., can't connect, lost leader)
|
||||||
}
|
// Treat Internal codes as if something failed, leaving the
|
||||||
// treat etcdserver errors not recognized by the client as halting
|
// system in an inconsistent state, but retrying could make progress.
|
||||||
return isConnClosing(err) || strings.Contains(err.Error(), "etcdserver:")
|
// (e.g., failed in middle of send, corrupted frame)
|
||||||
}
|
// TODO: are permanent Internal errors possible from grpc?
|
||||||
|
return code != codes.Unavailable && code != codes.Internal
|
||||||
// isConnClosing returns true if the error matches a grpc client closing error
|
|
||||||
func isConnClosing(err error) bool {
|
|
||||||
return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func toErr(ctx context.Context, err error) error {
|
func toErr(ctx context.Context, err error) error {
|
||||||
|
|
@ -312,12 +396,20 @@ func toErr(ctx context.Context, err error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = rpctypes.Error(err)
|
err = rpctypes.Error(err)
|
||||||
switch {
|
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||||
case ctx.Err() != nil && strings.Contains(err.Error(), "context"):
|
return err
|
||||||
err = ctx.Err()
|
}
|
||||||
case strings.Contains(err.Error(), ErrNoAvailableEndpoints.Error()):
|
code := grpc.Code(err)
|
||||||
|
switch code {
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
fallthrough
|
||||||
|
case codes.Canceled:
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
err = ctx.Err()
|
||||||
|
}
|
||||||
|
case codes.Unavailable:
|
||||||
err = ErrNoAvailableEndpoints
|
err = ErrNoAvailableEndpoints
|
||||||
case strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()):
|
case codes.FailedPrecondition:
|
||||||
err = grpc.ErrClientConnClosing
|
err = grpc.ErrClientConnClosing
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
|
||||||
// it is safe to retry on update.
|
// it is safe to retry on update.
|
||||||
for {
|
for {
|
||||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||||
resp, err := c.remote.MemberUpdate(ctx, r)
|
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return (*MemberUpdateResponse)(resp), nil
|
return (*MemberUpdateResponse)(resp), nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||||
switch result {
|
switch result {
|
||||||
case "=":
|
case "=":
|
||||||
r = pb.Compare_EQUAL
|
r = pb.Compare_EQUAL
|
||||||
|
case "!=":
|
||||||
|
r = pb.Compare_NOT_EQUAL
|
||||||
case ">":
|
case ">":
|
||||||
r = pb.Compare_GREATER
|
r = pb.Compare_GREATER
|
||||||
case "<":
|
case "<":
|
||||||
|
|
|
||||||
|
|
@ -28,15 +28,16 @@ type Config struct {
|
||||||
// Endpoints is a list of URLs
|
// Endpoints is a list of URLs
|
||||||
Endpoints []string
|
Endpoints []string
|
||||||
|
|
||||||
|
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||||
|
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||||
|
AutoSyncInterval time.Duration
|
||||||
|
|
||||||
// DialTimeout is the timeout for failing to establish a connection.
|
// DialTimeout is the timeout for failing to establish a connection.
|
||||||
DialTimeout time.Duration
|
DialTimeout time.Duration
|
||||||
|
|
||||||
// TLS holds the client secure credentials, if any.
|
// TLS holds the client secure credentials, if any.
|
||||||
TLS *tls.Config
|
TLS *tls.Config
|
||||||
|
|
||||||
// Logger is the logger used by client library.
|
|
||||||
Logger Logger
|
|
||||||
|
|
||||||
// Username is a username for authentication
|
// Username is a username for authentication
|
||||||
Username string
|
Username string
|
||||||
|
|
||||||
|
|
@ -46,6 +47,7 @@ type Config struct {
|
||||||
|
|
||||||
type yamlConfig struct {
|
type yamlConfig struct {
|
||||||
Endpoints []string `json:"endpoints"`
|
Endpoints []string `json:"endpoints"`
|
||||||
|
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||||
DialTimeout time.Duration `json:"dial-timeout"`
|
DialTimeout time.Duration `json:"dial-timeout"`
|
||||||
InsecureTransport bool `json:"insecure-transport"`
|
InsecureTransport bool `json:"insecure-transport"`
|
||||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||||
|
|
@ -68,8 +70,9 @@ func configFromFile(fpath string) (*Config, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
Endpoints: yc.Endpoints,
|
Endpoints: yc.Endpoints,
|
||||||
DialTimeout: yc.DialTimeout,
|
AutoSyncInterval: yc.AutoSyncInterval,
|
||||||
|
DialTimeout: yc.DialTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
if yc.InsecureTransport {
|
if yc.InsecureTransport {
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@
|
||||||
// etcd client returns 2 types of errors:
|
// etcd client returns 2 types of errors:
|
||||||
//
|
//
|
||||||
// 1. context error: canceled or deadline exceeded.
|
// 1. context error: canceled or deadline exceeded.
|
||||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
|
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||||
//
|
//
|
||||||
// Here is the example code to handle client errors:
|
// Here is the example code to handle client errors:
|
||||||
//
|
//
|
||||||
|
|
|
||||||
|
|
@ -85,6 +85,10 @@ func NewKV(c *Client) KV {
|
||||||
return &kv{remote: RetryKVClient(c)}
|
return &kv{remote: RetryKVClient(c)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||||
|
return &kv{remote: remote}
|
||||||
|
}
|
||||||
|
|
||||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||||
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
||||||
return r.put, toErr(ctx, err)
|
return r.put, toErr(ctx, err)
|
||||||
|
|
@ -101,7 +105,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
|
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
@ -121,6 +125,7 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if isHaltErr(ctx, err) {
|
if isHaltErr(ctx, err) {
|
||||||
return resp, toErr(ctx, err)
|
return resp, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
@ -137,21 +142,7 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||||
// TODO: handle other ops
|
// TODO: handle other ops
|
||||||
case tRange:
|
case tRange:
|
||||||
var resp *pb.RangeResponse
|
var resp *pb.RangeResponse
|
||||||
r := &pb.RangeRequest{
|
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||||
Key: op.key,
|
|
||||||
RangeEnd: op.end,
|
|
||||||
Limit: op.limit,
|
|
||||||
Revision: op.rev,
|
|
||||||
Serializable: op.serializable,
|
|
||||||
KeysOnly: op.keysOnly,
|
|
||||||
CountOnly: op.countOnly,
|
|
||||||
}
|
|
||||||
if op.sort != nil {
|
|
||||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
|
||||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false))
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,21 @@ type LeaseKeepAliveResponse struct {
|
||||||
TTL int64
|
TTL int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||||
|
type LeaseTimeToLiveResponse struct {
|
||||||
|
*pb.ResponseHeader
|
||||||
|
ID LeaseID `json:"id"`
|
||||||
|
|
||||||
|
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||||
|
TTL int64 `json:"ttl"`
|
||||||
|
|
||||||
|
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||||
|
GrantedTTL int64 `json:"granted-ttl"`
|
||||||
|
|
||||||
|
// Keys is the list of keys attached to this lease.
|
||||||
|
Keys [][]byte `json:"keys"`
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||||
// deadline before the actual TTL is known to the client.
|
// deadline before the actual TTL is known to the client.
|
||||||
|
|
@ -54,6 +69,21 @@ const (
|
||||||
NoLease LeaseID = 0
|
NoLease LeaseID = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||||
|
//
|
||||||
|
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||||
|
type ErrKeepAliveHalted struct {
|
||||||
|
Reason error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrKeepAliveHalted) Error() string {
|
||||||
|
s := "etcdclient: leases keep alive halted"
|
||||||
|
if e.Reason != nil {
|
||||||
|
s += ": " + e.Reason.Error()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
type Lease interface {
|
type Lease interface {
|
||||||
// Grant creates a new lease.
|
// Grant creates a new lease.
|
||||||
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||||
|
|
@ -61,6 +91,9 @@ type Lease interface {
|
||||||
// Revoke revokes the given lease.
|
// Revoke revokes the given lease.
|
||||||
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||||
|
|
||||||
|
// TimeToLive retrieves the lease information of the given lease ID.
|
||||||
|
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||||
|
|
||||||
// KeepAlive keeps the given lease alive forever.
|
// KeepAlive keeps the given lease alive forever.
|
||||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
|
|
@ -76,8 +109,9 @@ type Lease interface {
|
||||||
type lessor struct {
|
type lessor struct {
|
||||||
mu sync.Mutex // guards all fields
|
mu sync.Mutex // guards all fields
|
||||||
|
|
||||||
// donec is closed when recvKeepAliveLoop stops
|
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||||
donec chan struct{}
|
donec chan struct{}
|
||||||
|
loopErr error
|
||||||
|
|
||||||
remote pb.LeaseClient
|
remote pb.LeaseClient
|
||||||
|
|
||||||
|
|
@ -141,7 +175,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
|
||||||
return gresp, nil
|
return gresp, nil
|
||||||
}
|
}
|
||||||
if isHaltErr(cctx, err) {
|
if isHaltErr(cctx, err) {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(cctx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -164,10 +198,43 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
for {
|
||||||
|
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||||
|
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
|
||||||
|
if err == nil {
|
||||||
|
gresp := &LeaseTimeToLiveResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
GrantedTTL: resp.GrantedTTL,
|
||||||
|
Keys: resp.Keys,
|
||||||
|
}
|
||||||
|
return gresp, nil
|
||||||
|
}
|
||||||
|
if isHaltErr(cctx, err) {
|
||||||
|
return nil, toErr(cctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||||
|
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
|
// ensure that recvKeepAliveLoop is still running
|
||||||
|
select {
|
||||||
|
case <-l.donec:
|
||||||
|
err := l.loopErr
|
||||||
|
l.mu.Unlock()
|
||||||
|
close(ch)
|
||||||
|
return ch, ErrKeepAliveHalted{Reason: err}
|
||||||
|
default:
|
||||||
|
}
|
||||||
ka, ok := l.keepAlives[id]
|
ka, ok := l.keepAlives[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
// create fresh keep alive
|
// create fresh keep alive
|
||||||
|
|
@ -275,10 +342,11 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
||||||
return karesp, nil
|
return karesp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) recvKeepAliveLoop() {
|
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
close(l.donec)
|
close(l.donec)
|
||||||
|
l.loopErr = gerr
|
||||||
for _, ka := range l.keepAlives {
|
for _, ka := range l.keepAlives {
|
||||||
ka.Close()
|
ka.Close()
|
||||||
}
|
}
|
||||||
|
|
@ -291,13 +359,14 @@ func (l *lessor) recvKeepAliveLoop() {
|
||||||
resp, err := stream.Recv()
|
resp, err := stream.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isHaltErr(l.stopCtx, err) {
|
if isHaltErr(l.stopCtx, err) {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
stream, serr = l.resetRecv()
|
stream, serr = l.resetRecv()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
l.recvKeepAlive(resp)
|
l.recvKeepAlive(resp)
|
||||||
}
|
}
|
||||||
|
return serr
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||||
|
|
@ -347,7 +416,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send update to all channels
|
// send update to all channels
|
||||||
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
|
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||||
for _, ch := range ka.chs {
|
for _, ch := range ka.chs {
|
||||||
select {
|
select {
|
||||||
|
|
@ -393,7 +462,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tosend := make([]LeaseID, 0)
|
var tosend []LeaseID
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
|
|
|
||||||
|
|
@ -15,13 +15,15 @@
|
||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Logger is the logger used by client library.
|
||||||
|
// It implements grpclog.Logger interface.
|
||||||
type Logger grpclog.Logger
|
type Logger grpclog.Logger
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -34,20 +36,36 @@ type settableLogger struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// use go's standard logger by default like grpc
|
// disable client side logs by default
|
||||||
logger.mu.Lock()
|
logger.mu.Lock()
|
||||||
logger.l = log.New(os.Stderr, "", log.LstdFlags)
|
logger.l = log.New(ioutil.Discard, "", 0)
|
||||||
|
|
||||||
|
// logger has to override the grpclog at initialization so that
|
||||||
|
// any changes to the grpclog go through logger with locking
|
||||||
|
// instead of through SetLogger
|
||||||
|
//
|
||||||
|
// now updates only happen through settableLogger.set
|
||||||
grpclog.SetLogger(&logger)
|
grpclog.SetLogger(&logger)
|
||||||
logger.mu.Unlock()
|
logger.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *settableLogger) Set(l Logger) {
|
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||||
|
func SetLogger(l Logger) {
|
||||||
|
logger.set(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogger returns the current logger.
|
||||||
|
func GetLogger() Logger {
|
||||||
|
return logger.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *settableLogger) set(l Logger) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
logger.l = l
|
logger.l = l
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *settableLogger) Get() Logger {
|
func (s *settableLogger) get() Logger {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
l := logger.l
|
l := logger.l
|
||||||
s.mu.RUnlock()
|
s.mu.RUnlock()
|
||||||
|
|
@ -56,9 +74,9 @@ func (s *settableLogger) Get() Logger {
|
||||||
|
|
||||||
// implement the grpclog.Logger interface
|
// implement the grpclog.Logger interface
|
||||||
|
|
||||||
func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) }
|
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) }
|
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) }
|
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||||
func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) }
|
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) }
|
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||||
func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) }
|
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
||||||
|
|
|
||||||
|
|
@ -14,9 +14,7 @@
|
||||||
|
|
||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type opType int
|
type opType int
|
||||||
|
|
||||||
|
|
@ -43,6 +41,10 @@ type Op struct {
|
||||||
serializable bool
|
serializable bool
|
||||||
keysOnly bool
|
keysOnly bool
|
||||||
countOnly bool
|
countOnly bool
|
||||||
|
minModRev int64
|
||||||
|
maxModRev int64
|
||||||
|
minCreateRev int64
|
||||||
|
maxCreateRev int64
|
||||||
|
|
||||||
// for range, watch
|
// for range, watch
|
||||||
rev int64
|
rev int64
|
||||||
|
|
@ -52,29 +54,45 @@ type Op struct {
|
||||||
|
|
||||||
// progressNotify is for progress updates.
|
// progressNotify is for progress updates.
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// createdNotify is for created event
|
||||||
|
createdNotify bool
|
||||||
|
// filters for watchers
|
||||||
|
filterPut bool
|
||||||
|
filterDelete bool
|
||||||
|
|
||||||
// for put
|
// for put
|
||||||
val []byte
|
val []byte
|
||||||
leaseID LeaseID
|
leaseID LeaseID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||||
|
if op.t != tRange {
|
||||||
|
panic("op.t != tRange")
|
||||||
|
}
|
||||||
|
r := &pb.RangeRequest{
|
||||||
|
Key: op.key,
|
||||||
|
RangeEnd: op.end,
|
||||||
|
Limit: op.limit,
|
||||||
|
Revision: op.rev,
|
||||||
|
Serializable: op.serializable,
|
||||||
|
KeysOnly: op.keysOnly,
|
||||||
|
CountOnly: op.countOnly,
|
||||||
|
MinModRevision: op.minModRev,
|
||||||
|
MaxModRevision: op.maxModRev,
|
||||||
|
MinCreateRevision: op.minCreateRev,
|
||||||
|
MaxCreateRevision: op.maxCreateRev,
|
||||||
|
}
|
||||||
|
if op.sort != nil {
|
||||||
|
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||||
|
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func (op Op) toRequestOp() *pb.RequestOp {
|
func (op Op) toRequestOp() *pb.RequestOp {
|
||||||
switch op.t {
|
switch op.t {
|
||||||
case tRange:
|
case tRange:
|
||||||
r := &pb.RangeRequest{
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||||
Key: op.key,
|
|
||||||
RangeEnd: op.end,
|
|
||||||
Limit: op.limit,
|
|
||||||
Revision: op.rev,
|
|
||||||
Serializable: op.serializable,
|
|
||||||
KeysOnly: op.keysOnly,
|
|
||||||
CountOnly: op.countOnly,
|
|
||||||
}
|
|
||||||
if op.sort != nil {
|
|
||||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
|
||||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
|
||||||
}
|
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
|
|
||||||
case tPut:
|
case tPut:
|
||||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||||
|
|
@ -112,6 +130,14 @@ func OpDelete(key string, opts ...OpOption) Op {
|
||||||
panic("unexpected serializable in delete")
|
panic("unexpected serializable in delete")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in delete")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in delete")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in delete")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in delete")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in delete")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
@ -131,7 +157,15 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
||||||
case ret.serializable:
|
case ret.serializable:
|
||||||
panic("unexpected serializable in put")
|
panic("unexpected serializable in put")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in put")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in put")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in put")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in put")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in put")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
@ -149,7 +183,11 @@ func opWatch(key string, opts ...OpOption) Op {
|
||||||
case ret.serializable:
|
case ret.serializable:
|
||||||
panic("unexpected serializable in watch")
|
panic("unexpected serializable in watch")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in watch")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in watch")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in watch")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
@ -181,6 +219,14 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
||||||
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||||
func WithSort(target SortTarget, order SortOrder) OpOption {
|
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
|
if target == SortByKey && order == SortAscend {
|
||||||
|
// If order != SortNone, server fetches the entire key-space,
|
||||||
|
// and then applies the sort and limit, if provided.
|
||||||
|
// Since current mvcc.Range implementation returns results
|
||||||
|
// sorted by keys in lexicographically ascending order,
|
||||||
|
// client should ignore SortOrder if the target is SortByKey.
|
||||||
|
order = SortNone
|
||||||
|
}
|
||||||
op.sort = &SortOption{target, order}
|
op.sort = &SortOption{target, order}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -245,6 +291,18 @@ func WithCountOnly() OpOption {
|
||||||
return func(op *Op) { op.countOnly = true }
|
return func(op *Op) { op.countOnly = true }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||||
|
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||||
|
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||||
|
|
||||||
|
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||||
|
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||||
|
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||||
|
|
||||||
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||||
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||||
|
|
||||||
|
|
@ -268,7 +326,8 @@ func withTop(target SortTarget, order SortOrder) []OpOption {
|
||||||
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithProgressNotify makes watch server send periodic progress updates.
|
// WithProgressNotify makes watch server send periodic progress updates
|
||||||
|
// every 10 minutes when there is no incoming events.
|
||||||
// Progress updates have zero events in WatchResponse.
|
// Progress updates have zero events in WatchResponse.
|
||||||
func WithProgressNotify() OpOption {
|
func WithProgressNotify() OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
|
|
@ -276,6 +335,23 @@ func WithProgressNotify() OpOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithCreatedNotify makes watch server sends the created event.
|
||||||
|
func WithCreatedNotify() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.createdNotify = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterPut discards PUT events from the watcher.
|
||||||
|
func WithFilterPut() OpOption {
|
||||||
|
return func(op *Op) { op.filterPut = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterDelete discards DELETE events from the watcher.
|
||||||
|
func WithFilterDelete() OpOption {
|
||||||
|
return func(op *Op) { op.filterDelete = true }
|
||||||
|
}
|
||||||
|
|
||||||
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||||
// nothing will be returned.
|
// nothing will be returned.
|
||||||
func WithPrevKV() OpOption {
|
func WithPrevKV() OpOption {
|
||||||
|
|
@ -283,3 +359,32 @@ func WithPrevKV() OpOption {
|
||||||
op.prevKV = true
|
op.prevKV = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseOp represents an Operation that lease can execute.
|
||||||
|
type LeaseOp struct {
|
||||||
|
id LeaseID
|
||||||
|
|
||||||
|
// for TimeToLive
|
||||||
|
attachedKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOption configures lease operations.
|
||||||
|
type LeaseOption func(*LeaseOp)
|
||||||
|
|
||||||
|
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAttachedKeys requests lease timetolive API to return
|
||||||
|
// attached keys of given lease ID.
|
||||||
|
func WithAttachedKeys() LeaseOption {
|
||||||
|
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||||
|
ret := &LeaseOp{id: id}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,68 +15,117 @@
|
||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
type rpcFunc func(ctx context.Context) error
|
type rpcFunc func(ctx context.Context) error
|
||||||
type retryRpcFunc func(context.Context, rpcFunc)
|
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||||
|
|
||||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||||
return func(rpcCtx context.Context, f rpcFunc) {
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
for {
|
for {
|
||||||
err := f(rpcCtx)
|
err := f(rpcCtx)
|
||||||
// ignore grpc conn closing on fail-fast calls; they are transient errors
|
if err == nil {
|
||||||
if err == nil || !isConnClosing(err) {
|
return nil
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eErr := rpctypes.Error(err)
|
||||||
|
// always stop retry on etcd errors
|
||||||
|
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// only retry if unavailable
|
||||||
|
if grpc.Code(err) != codes.Unavailable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-c.balancer.ConnectNotify():
|
case <-c.balancer.ConnectNotify():
|
||||||
case <-rpcCtx.Done():
|
case <-rpcCtx.Done():
|
||||||
|
return rpcCtx.Err()
|
||||||
case <-c.ctx.Done():
|
case <-c.ctx.Done():
|
||||||
return
|
return c.ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryKVClient struct {
|
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||||
pb.KVClient
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
retryf retryRpcFunc
|
for {
|
||||||
|
err := f(rpcCtx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// always stop retry on etcd errors other than invalid auth token
|
||||||
|
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||||
|
gterr := c.getToken(rpcCtx)
|
||||||
|
if gterr != nil {
|
||||||
|
return err // return the original error for simplicity
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||||
func RetryKVClient(c *Client) pb.KVClient {
|
func RetryKVClient(c *Client) pb.KVClient {
|
||||||
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||||
|
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
type retryKVClient struct {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
*retryWriteKVClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryWriteKVClient struct {
|
||||||
|
pb.KVClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -90,11 +139,12 @@ type retryLeaseClient struct {
|
||||||
|
|
||||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||||
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||||
|
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -103,7 +153,7 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -121,7 +171,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -129,7 +179,7 @@ func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRe
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -137,7 +187,7 @@ func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRe
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -155,7 +205,7 @@ func RetryAuthClient(c *Client) pb.AuthClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -163,7 +213,7 @@ func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableReq
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -171,7 +221,7 @@ func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableR
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -179,7 +229,7 @@ func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddReque
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -187,7 +237,7 @@ func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDelet
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -195,7 +245,7 @@ func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthU
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -203,7 +253,7 @@ func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -211,7 +261,7 @@ func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserR
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -219,7 +269,7 @@ func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddReque
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -227,7 +277,7 @@ func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDelet
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
@ -235,7 +285,7 @@ func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.Auth
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||||
rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ import (
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Txn is the interface that wraps mini-transactions.
|
// Txn is the interface that wraps mini-transactions.
|
||||||
|
|
@ -152,7 +153,12 @@ func (txn *txn) Commit() (*TxnResponse, error) {
|
||||||
|
|
||||||
func (txn *txn) commit() (*TxnResponse, error) {
|
func (txn *txn) commit() (*TxnResponse, error) {
|
||||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||||
resp, err := txn.kv.remote.Txn(txn.ctx, r)
|
|
||||||
|
var opts []grpc.CallOption
|
||||||
|
if !txn.isWrite {
|
||||||
|
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||||
|
}
|
||||||
|
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -61,8 +61,8 @@ type WatchResponse struct {
|
||||||
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||||
Canceled bool
|
Canceled bool
|
||||||
|
|
||||||
// created is used to indicate the creation of the watcher.
|
// Created is used to indicate the creation of the watcher.
|
||||||
created bool
|
Created bool
|
||||||
|
|
||||||
closeErr error
|
closeErr error
|
||||||
}
|
}
|
||||||
|
|
@ -92,7 +92,7 @@ func (wr *WatchResponse) Err() error {
|
||||||
|
|
||||||
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||||
func (wr *WatchResponse) IsProgressNotify() bool {
|
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||||
return len(wr.Events) == 0 && !wr.Canceled && !wr.created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher implements the Watcher interface
|
// watcher implements the Watcher interface
|
||||||
|
|
@ -101,6 +101,7 @@ type watcher struct {
|
||||||
|
|
||||||
// mu protects the grpc streams map
|
// mu protects the grpc streams map
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
// streams holds all the active grpc streams keyed by ctx value.
|
// streams holds all the active grpc streams keyed by ctx value.
|
||||||
streams map[string]*watchGrpcStream
|
streams map[string]*watchGrpcStream
|
||||||
}
|
}
|
||||||
|
|
@ -131,6 +132,8 @@ type watchGrpcStream struct {
|
||||||
errc chan error
|
errc chan error
|
||||||
// closingc gets the watcherStream of closing watchers
|
// closingc gets the watcherStream of closing watchers
|
||||||
closingc chan *watcherStream
|
closingc chan *watcherStream
|
||||||
|
// wg is Done when all substream goroutines have exited
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
// resumec closes to signal that all substreams should begin resuming
|
// resumec closes to signal that all substreams should begin resuming
|
||||||
resumec chan struct{}
|
resumec chan struct{}
|
||||||
|
|
@ -144,8 +147,12 @@ type watchRequest struct {
|
||||||
key string
|
key string
|
||||||
end string
|
end string
|
||||||
rev int64
|
rev int64
|
||||||
// progressNotify is for progress updates.
|
// send created notification event if this field is true
|
||||||
|
createdNotify bool
|
||||||
|
// progressNotify is for progress updates
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// filters is the list of events to filter out
|
||||||
|
filters []pb.WatchCreateRequest_FilterType
|
||||||
// get the previous key-value pair before the event happens
|
// get the previous key-value pair before the event happens
|
||||||
prevKV bool
|
prevKV bool
|
||||||
// retc receives a chan WatchResponse once the watcher is established
|
// retc receives a chan WatchResponse once the watcher is established
|
||||||
|
|
@ -173,8 +180,12 @@ type watcherStream struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWatcher(c *Client) Watcher {
|
func NewWatcher(c *Client) Watcher {
|
||||||
|
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||||
return &watcher{
|
return &watcher{
|
||||||
remote: pb.NewWatchClient(c.conn),
|
remote: wc,
|
||||||
streams: make(map[string]*watchGrpcStream),
|
streams: make(map[string]*watchGrpcStream),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -215,12 +226,22 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||||
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
||||||
ow := opWatch(key, opts...)
|
ow := opWatch(key, opts...)
|
||||||
|
|
||||||
|
var filters []pb.WatchCreateRequest_FilterType
|
||||||
|
if ow.filterPut {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||||
|
}
|
||||||
|
if ow.filterDelete {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||||
|
}
|
||||||
|
|
||||||
wr := &watchRequest{
|
wr := &watchRequest{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
createdNotify: ow.createdNotify,
|
||||||
key: string(ow.key),
|
key: string(ow.key),
|
||||||
end: string(ow.end),
|
end: string(ow.end),
|
||||||
rev: ow.rev,
|
rev: ow.rev,
|
||||||
progressNotify: ow.progressNotify,
|
progressNotify: ow.progressNotify,
|
||||||
|
filters: filters,
|
||||||
prevKV: ow.prevKV,
|
prevKV: ow.prevKV,
|
||||||
retc: make(chan chan WatchResponse, 1),
|
retc: make(chan chan WatchResponse, 1),
|
||||||
}
|
}
|
||||||
|
|
@ -374,18 +395,20 @@ func (w *watchGrpcStream) run() {
|
||||||
for _, ws := range w.substreams {
|
for _, ws := range w.substreams {
|
||||||
if _, ok := closing[ws]; !ok {
|
if _, ok := closing[ws]; !ok {
|
||||||
close(ws.recvc)
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, ws := range w.resuming {
|
for _, ws := range w.resuming {
|
||||||
if _, ok := closing[ws]; ws != nil && !ok {
|
if _, ok := closing[ws]; ws != nil && !ok {
|
||||||
close(ws.recvc)
|
close(ws.recvc)
|
||||||
|
closing[ws] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.joinSubstreams()
|
w.joinSubstreams()
|
||||||
for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- {
|
for range closing {
|
||||||
w.closeSubstream(<-w.closingc)
|
w.closeSubstream(<-w.closingc)
|
||||||
}
|
}
|
||||||
|
w.wg.Wait()
|
||||||
w.owner.closeStream(w)
|
w.owner.closeStream(w)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -410,6 +433,7 @@ func (w *watchGrpcStream) run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ws.donec = make(chan struct{})
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
go w.serveSubstream(ws, w.resumec)
|
go w.serveSubstream(ws, w.resumec)
|
||||||
|
|
||||||
// queue up for watcher creation/resume
|
// queue up for watcher creation/resume
|
||||||
|
|
@ -458,7 +482,7 @@ func (w *watchGrpcStream) run() {
|
||||||
}
|
}
|
||||||
// watch client failed to recv; spawn another if possible
|
// watch client failed to recv; spawn another if possible
|
||||||
case err := <-w.errc:
|
case err := <-w.errc:
|
||||||
if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||||
closeErr = err
|
closeErr = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -508,7 +532,7 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||||
Header: *pbresp.Header,
|
Header: *pbresp.Header,
|
||||||
Events: events,
|
Events: events,
|
||||||
CompactRevision: pbresp.CompactRevision,
|
CompactRevision: pbresp.CompactRevision,
|
||||||
created: pbresp.Created,
|
Created: pbresp.Created,
|
||||||
Canceled: pbresp.Canceled,
|
Canceled: pbresp.Canceled,
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
|
|
@ -555,6 +579,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||||
if !resuming {
|
if !resuming {
|
||||||
w.closingc <- ws
|
w.closingc <- ws
|
||||||
}
|
}
|
||||||
|
w.wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
emptyWr := &WatchResponse{}
|
emptyWr := &WatchResponse{}
|
||||||
|
|
@ -562,14 +587,6 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||||
curWr := emptyWr
|
curWr := emptyWr
|
||||||
outc := ws.outc
|
outc := ws.outc
|
||||||
|
|
||||||
if len(ws.buf) > 0 && ws.buf[0].created {
|
|
||||||
select {
|
|
||||||
case ws.initReq.retc <- ws.outc:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
ws.buf = ws.buf[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ws.buf) > 0 {
|
if len(ws.buf) > 0 {
|
||||||
curWr = ws.buf[0]
|
curWr = ws.buf[0]
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -587,13 +604,35 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||||
// shutdown from closeSubstream
|
// shutdown from closeSubstream
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO pause channel if buffer gets too large
|
|
||||||
ws.buf = append(ws.buf, wr)
|
if wr.Created {
|
||||||
|
if ws.initReq.retc != nil {
|
||||||
|
ws.initReq.retc <- ws.outc
|
||||||
|
// to prevent next write from taking the slot in buffered channel
|
||||||
|
// and posting duplicate create events
|
||||||
|
ws.initReq.retc = nil
|
||||||
|
|
||||||
|
// send first creation event only if requested
|
||||||
|
if ws.initReq.createdNotify {
|
||||||
|
ws.outc <- *wr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
nextRev = wr.Header.Revision
|
nextRev = wr.Header.Revision
|
||||||
if len(wr.Events) > 0 {
|
if len(wr.Events) > 0 {
|
||||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||||
}
|
}
|
||||||
ws.initReq.rev = nextRev
|
ws.initReq.rev = nextRev
|
||||||
|
|
||||||
|
// created event is already sent above,
|
||||||
|
// watcher should not post duplicate events
|
||||||
|
if wr.Created {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO pause channel if buffer gets too large
|
||||||
|
ws.buf = append(ws.buf, wr)
|
||||||
case <-w.ctx.Done():
|
case <-w.ctx.Done():
|
||||||
return
|
return
|
||||||
case <-ws.initReq.ctx.Done():
|
case <-ws.initReq.ctx.Done():
|
||||||
|
|
@ -639,6 +678,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ws.donec = make(chan struct{})
|
ws.donec = make(chan struct{})
|
||||||
|
w.wg.Add(1)
|
||||||
go w.serveSubstream(ws, w.resumec)
|
go w.serveSubstream(ws, w.resumec)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -659,6 +699,10 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
||||||
go func(ws *watcherStream) {
|
go func(ws *watcherStream) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if ws.closing {
|
if ws.closing {
|
||||||
|
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||||
|
close(ws.outc)
|
||||||
|
ws.outc = nil
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
|
|
@ -719,6 +763,7 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||||
Key: []byte(wr.key),
|
Key: []byte(wr.key),
|
||||||
RangeEnd: []byte(wr.end),
|
RangeEnd: []byte(wr.end),
|
||||||
ProgressNotify: wr.progressNotify,
|
ProgressNotify: wr.progressNotify,
|
||||||
|
Filters: wr.filters,
|
||||||
PrevKv: wr.prevKV,
|
PrevKv: wr.prevKV,
|
||||||
}
|
}
|
||||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,8 @@ var (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Number of retries discovery will attempt before giving up and erroring out.
|
// Number of retries discovery will attempt before giving up and erroring out.
|
||||||
nRetries = uint(math.MaxUint32)
|
nRetries = uint(math.MaxUint32)
|
||||||
|
maxExpoentialRetries = uint(8)
|
||||||
)
|
)
|
||||||
|
|
||||||
// JoinCluster will connect to the discovery service at the given url, and
|
// JoinCluster will connect to the discovery service at the given url, and
|
||||||
|
|
@ -243,7 +244,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||||
}
|
}
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
nodes := make([]*client.Node, 0)
|
var nodes []*client.Node
|
||||||
// append non-config keys to nodes
|
// append non-config keys to nodes
|
||||||
for _, n := range resp.Node.Nodes {
|
for _, n := range resp.Node.Nodes {
|
||||||
if !(path.Base(n.Key) == path.Base(configKey)) {
|
if !(path.Base(n.Key) == path.Base(configKey)) {
|
||||||
|
|
@ -268,9 +269,14 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||||
|
|
||||||
func (d *discovery) logAndBackoffForRetry(step string) {
|
func (d *discovery) logAndBackoffForRetry(step string) {
|
||||||
d.retries++
|
d.retries++
|
||||||
retryTime := time.Second * (0x1 << d.retries)
|
// logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
|
||||||
plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTime)
|
retries := d.retries
|
||||||
d.clock.Sleep(retryTime)
|
if retries > maxExpoentialRetries {
|
||||||
|
retries = maxExpoentialRetries
|
||||||
|
}
|
||||||
|
retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
|
||||||
|
plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
|
||||||
|
d.clock.Sleep(retryTimeInSecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
|
func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ package discovery
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
|
@ -33,9 +34,8 @@ var (
|
||||||
// Also doesn't do any lookups for the token (though it could)
|
// Also doesn't do any lookups for the token (though it could)
|
||||||
// Also sees each entry as a separate instance.
|
// Also sees each entry as a separate instance.
|
||||||
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
|
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
|
||||||
stringParts := make([]string, 0)
|
|
||||||
tempName := int(0)
|
tempName := int(0)
|
||||||
tcpAPUrls := make([]string, 0)
|
tcp2ap := make(map[string]url.URL)
|
||||||
|
|
||||||
// First, resolve the apurls
|
// First, resolve the apurls
|
||||||
for _, url := range apurls {
|
for _, url := range apurls {
|
||||||
|
|
@ -44,10 +44,11 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
||||||
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
|
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
tcpAPUrls = append(tcpAPUrls, tcpAddr.String())
|
tcp2ap[tcpAddr.String()] = url
|
||||||
}
|
}
|
||||||
|
|
||||||
updateNodeMap := func(service, prefix string) error {
|
stringParts := []string{}
|
||||||
|
updateNodeMap := func(service, scheme string) error {
|
||||||
_, addrs, err := lookupSRV(service, "tcp", dns)
|
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -61,35 +62,37 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
n := ""
|
n := ""
|
||||||
for _, url := range tcpAPUrls {
|
url, ok := tcp2ap[tcpAddr.String()]
|
||||||
if url == tcpAddr.String() {
|
if ok {
|
||||||
n = name
|
n = name
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if n == "" {
|
if n == "" {
|
||||||
n = fmt.Sprintf("%d", tempName)
|
n = fmt.Sprintf("%d", tempName)
|
||||||
tempName += 1
|
tempName++
|
||||||
}
|
}
|
||||||
// SRV records have a trailing dot but URL shouldn't.
|
// SRV records have a trailing dot but URL shouldn't.
|
||||||
shortHost := strings.TrimSuffix(srv.Target, ".")
|
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||||
urlHost := net.JoinHostPort(shortHost, port)
|
urlHost := net.JoinHostPort(shortHost, port)
|
||||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, urlHost))
|
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||||
plog.Noticef("got bootstrap from DNS for %s at %s%s", service, prefix, urlHost)
|
plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost)
|
||||||
|
if ok && url.Scheme != scheme {
|
||||||
|
plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
failCount := 0
|
failCount := 0
|
||||||
err := updateNodeMap("etcd-server-ssl", "https://")
|
err := updateNodeMap("etcd-server-ssl", "https")
|
||||||
srvErr := make([]string, 2)
|
srvErr := make([]string, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
|
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
|
||||||
failCount += 1
|
failCount++
|
||||||
}
|
}
|
||||||
err = updateNodeMap("etcd-server", "http://")
|
err = updateNodeMap("etcd-server", "http")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
|
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
|
||||||
failCount += 1
|
failCount++
|
||||||
}
|
}
|
||||||
if failCount == 2 {
|
if failCount == 2 {
|
||||||
plog.Warningf(srvErr[0])
|
plog.Warningf(srvErr[0])
|
||||||
|
|
|
||||||
|
|
@ -30,15 +30,14 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "api")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||||
|
|
||||||
// capabilityMaps is a static map of version to capability map.
|
// capabilityMaps is a static map of version to capability map.
|
||||||
// the base capabilities is the set of capability 2.0 supports.
|
// the base capabilities is the set of capability 2.0 supports.
|
||||||
capabilityMaps = map[string]map[Capability]bool{
|
capabilityMaps = map[string]map[Capability]bool{
|
||||||
"2.1.0": {AuthCapability: true},
|
|
||||||
"2.2.0": {AuthCapability: true},
|
|
||||||
"2.3.0": {AuthCapability: true},
|
"2.3.0": {AuthCapability: true},
|
||||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||||
|
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
enableMapMu sync.RWMutex
|
enableMapMu sync.RWMutex
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/pprof"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -57,7 +56,6 @@ const (
|
||||||
healthPath = "/health"
|
healthPath = "/health"
|
||||||
versionPath = "/version"
|
versionPath = "/version"
|
||||||
configPath = "/config"
|
configPath = "/config"
|
||||||
pprofPrefix = "/debug/pprof"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
||||||
|
|
@ -113,23 +111,6 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
|
||||||
mux.Handle(deprecatedMachinesPrefix, dmh)
|
mux.Handle(deprecatedMachinesPrefix, dmh)
|
||||||
handleAuth(mux, sech)
|
handleAuth(mux, sech)
|
||||||
|
|
||||||
if server.IsPprofEnabled() {
|
|
||||||
plog.Infof("pprof is enabled under %s", pprofPrefix)
|
|
||||||
|
|
||||||
mux.HandleFunc(pprofPrefix, pprof.Index)
|
|
||||||
mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile)
|
|
||||||
mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol)
|
|
||||||
mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline)
|
|
||||||
// TODO: currently, we don't create an entry for pprof.Trace,
|
|
||||||
// because go 1.4 doesn't provide it. After support of go 1.4 is dropped,
|
|
||||||
// we should add the entry.
|
|
||||||
|
|
||||||
mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
|
|
||||||
mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
|
|
||||||
mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
|
|
||||||
mux.Handle(pprofPrefix+"/block", pprof.Handler("block"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return requestLogger(mux)
|
return requestLogger(mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -153,7 +134,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
clock := clockwork.NewRealClock()
|
clock := clockwork.NewRealClock()
|
||||||
startTime := clock.Now()
|
startTime := clock.Now()
|
||||||
rr, err := parseKeyRequest(r, clock)
|
rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeKeyError(w, err)
|
writeKeyError(w, err)
|
||||||
return
|
return
|
||||||
|
|
@ -175,7 +156,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case resp.Event != nil:
|
case resp.Event != nil:
|
||||||
if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
|
if err := writeKeyEvent(w, resp.Event, noValueOnSuccess, h.timer); err != nil {
|
||||||
// Should never be reached
|
// Should never be reached
|
||||||
plog.Errorf("error writing event (%v)", err)
|
plog.Errorf("error writing event (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
@ -365,32 +346,23 @@ func serveVars(w http.ResponseWriter, r *http.Request) {
|
||||||
fmt.Fprintf(w, "\n}\n")
|
fmt.Fprintf(w, "\n}\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: change etcdserver to raft interface when we have it.
|
|
||||||
// add test for healthHandler when we have the interface ready.
|
|
||||||
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
if !allowMethod(w, r.Method, "GET") {
|
if !allowMethod(w, r.Method, "GET") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint64(server.Leader()) == raft.None {
|
if uint64(server.Leader()) == raft.None {
|
||||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
// wait for raft's progress
|
defer cancel()
|
||||||
index := server.Index()
|
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
|
||||||
for i := 0; i < 3; i++ {
|
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||||
time.Sleep(250 * time.Millisecond)
|
return
|
||||||
if server.Index() > index {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte(`{"health": "true"}`))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
w.Write([]byte(`{"health": "true"}`))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -449,19 +421,20 @@ func logHandleFunc(w http.ResponseWriter, r *http.Request) {
|
||||||
// parseKeyRequest converts a received http.Request on keysPrefix to
|
// parseKeyRequest converts a received http.Request on keysPrefix to
|
||||||
// a server Request, performing validation of supplied fields as appropriate.
|
// a server Request, performing validation of supplied fields as appropriate.
|
||||||
// If any validation fails, an empty Request and non-nil error is returned.
|
// If any validation fails, an empty Request and non-nil error is returned.
|
||||||
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, error) {
|
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
|
||||||
|
noValueOnSuccess := false
|
||||||
emptyReq := etcdserverpb.Request{}
|
emptyReq := etcdserverpb.Request{}
|
||||||
|
|
||||||
err := r.ParseForm()
|
err := r.ParseForm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidForm,
|
etcdErr.EcodeInvalidForm,
|
||||||
err.Error(),
|
err.Error(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(r.URL.Path, keysPrefix) {
|
if !strings.HasPrefix(r.URL.Path, keysPrefix) {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidForm,
|
etcdErr.EcodeInvalidForm,
|
||||||
"incorrect key prefix",
|
"incorrect key prefix",
|
||||||
)
|
)
|
||||||
|
|
@ -470,13 +443,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
|
|
||||||
var pIdx, wIdx uint64
|
var pIdx, wIdx uint64
|
||||||
if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
|
if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeIndexNaN,
|
etcdErr.EcodeIndexNaN,
|
||||||
`invalid value for "prevIndex"`,
|
`invalid value for "prevIndex"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
|
if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeIndexNaN,
|
etcdErr.EcodeIndexNaN,
|
||||||
`invalid value for "waitIndex"`,
|
`invalid value for "waitIndex"`,
|
||||||
)
|
)
|
||||||
|
|
@ -484,45 +457,45 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
|
|
||||||
var rec, sort, wait, dir, quorum, stream bool
|
var rec, sort, wait, dir, quorum, stream bool
|
||||||
if rec, err = getBool(r.Form, "recursive"); err != nil {
|
if rec, err = getBool(r.Form, "recursive"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "recursive"`,
|
`invalid value for "recursive"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if sort, err = getBool(r.Form, "sorted"); err != nil {
|
if sort, err = getBool(r.Form, "sorted"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "sorted"`,
|
`invalid value for "sorted"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if wait, err = getBool(r.Form, "wait"); err != nil {
|
if wait, err = getBool(r.Form, "wait"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "wait"`,
|
`invalid value for "wait"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
// TODO(jonboulle): define what parameters dir is/isn't compatible with?
|
// TODO(jonboulle): define what parameters dir is/isn't compatible with?
|
||||||
if dir, err = getBool(r.Form, "dir"); err != nil {
|
if dir, err = getBool(r.Form, "dir"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "dir"`,
|
`invalid value for "dir"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if quorum, err = getBool(r.Form, "quorum"); err != nil {
|
if quorum, err = getBool(r.Form, "quorum"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "quorum"`,
|
`invalid value for "quorum"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if stream, err = getBool(r.Form, "stream"); err != nil {
|
if stream, err = getBool(r.Form, "stream"); err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`invalid value for "stream"`,
|
`invalid value for "stream"`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if wait && r.Method != "GET" {
|
if wait && r.Method != "GET" {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
`"wait" can only be used with GET requests`,
|
`"wait" can only be used with GET requests`,
|
||||||
)
|
)
|
||||||
|
|
@ -530,19 +503,26 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
|
|
||||||
pV := r.FormValue("prevValue")
|
pV := r.FormValue("prevValue")
|
||||||
if _, ok := r.Form["prevValue"]; ok && pV == "" {
|
if _, ok := r.Form["prevValue"]; ok && pV == "" {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodePrevValueRequired,
|
etcdErr.EcodePrevValueRequired,
|
||||||
`"prevValue" cannot be empty`,
|
`"prevValue" cannot be empty`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
|
||||||
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
|
etcdErr.EcodeInvalidField,
|
||||||
|
`invalid value for "noValueOnSuccess"`,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// TTL is nullable, so leave it null if not specified
|
// TTL is nullable, so leave it null if not specified
|
||||||
// or an empty string
|
// or an empty string
|
||||||
var ttl *uint64
|
var ttl *uint64
|
||||||
if len(r.FormValue("ttl")) > 0 {
|
if len(r.FormValue("ttl")) > 0 {
|
||||||
i, err := getUint64(r.Form, "ttl")
|
i, err := getUint64(r.Form, "ttl")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeTTLNaN,
|
etcdErr.EcodeTTLNaN,
|
||||||
`invalid value for "ttl"`,
|
`invalid value for "ttl"`,
|
||||||
)
|
)
|
||||||
|
|
@ -555,7 +535,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
if _, ok := r.Form["prevExist"]; ok {
|
if _, ok := r.Form["prevExist"]; ok {
|
||||||
bv, err := getBool(r.Form, "prevExist")
|
bv, err := getBool(r.Form, "prevExist")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
"invalid value for prevExist",
|
"invalid value for prevExist",
|
||||||
)
|
)
|
||||||
|
|
@ -568,7 +548,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
if _, ok := r.Form["refresh"]; ok {
|
if _, ok := r.Form["refresh"]; ok {
|
||||||
bv, err := getBool(r.Form, "refresh")
|
bv, err := getBool(r.Form, "refresh")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeInvalidField,
|
etcdErr.EcodeInvalidField,
|
||||||
"invalid value for refresh",
|
"invalid value for refresh",
|
||||||
)
|
)
|
||||||
|
|
@ -577,13 +557,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
if refresh != nil && *refresh {
|
if refresh != nil && *refresh {
|
||||||
val := r.FormValue("value")
|
val := r.FormValue("value")
|
||||||
if _, ok := r.Form["value"]; ok && val != "" {
|
if _, ok := r.Form["value"]; ok && val != "" {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeRefreshValue,
|
etcdErr.EcodeRefreshValue,
|
||||||
`A value was provided on a refresh`,
|
`A value was provided on a refresh`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if ttl == nil {
|
if ttl == nil {
|
||||||
return emptyReq, etcdErr.NewRequestError(
|
return emptyReq, false, etcdErr.NewRequestError(
|
||||||
etcdErr.EcodeRefreshTTLRequired,
|
etcdErr.EcodeRefreshTTLRequired,
|
||||||
`No TTL value set`,
|
`No TTL value set`,
|
||||||
)
|
)
|
||||||
|
|
@ -621,13 +601,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||||
rr.Expiration = clock.Now().Add(expr).UnixNano()
|
rr.Expiration = clock.Now().Add(expr).UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
return rr, nil
|
return rr, noValueOnSuccess, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeKeyEvent trims the prefix of key path in a single Event under
|
// writeKeyEvent trims the prefix of key path in a single Event under
|
||||||
// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
|
// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
|
||||||
// ResponseWriter, along with the appropriate headers.
|
// ResponseWriter, along with the appropriate headers.
|
||||||
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTimer) error {
|
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, noValueOnSuccess bool, rt etcdserver.RaftTimer) error {
|
||||||
if ev == nil {
|
if ev == nil {
|
||||||
return errors.New("cannot write empty Event!")
|
return errors.New("cannot write empty Event!")
|
||||||
}
|
}
|
||||||
|
|
@ -641,6 +621,12 @@ func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTim
|
||||||
}
|
}
|
||||||
|
|
||||||
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
|
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
|
||||||
|
if noValueOnSuccess &&
|
||||||
|
(ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
|
||||||
|
ev.Action == store.Create || ev.Action == store.Update) {
|
||||||
|
ev.Node = nil
|
||||||
|
ev.PrevNode = nil
|
||||||
|
}
|
||||||
return json.NewEncoder(w).Encode(ev)
|
return json.NewEncoder(w).Encode(ev)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -747,6 +733,10 @@ func trimErrorPrefix(err error, prefix string) error {
|
||||||
|
|
||||||
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
|
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
|
||||||
ctype := r.Header.Get("Content-Type")
|
ctype := r.Header.Get("Content-Type")
|
||||||
|
semicolonPosition := strings.Index(ctype, ";")
|
||||||
|
if semicolonPosition != -1 {
|
||||||
|
ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
|
||||||
|
}
|
||||||
if ctype != "application/json" {
|
if ctype != "application/json" {
|
||||||
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
|
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
|
||||||
return false
|
return false
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v2http")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
|
||||||
mlog = logutil.NewMergeLogger(plog)
|
mlog = logutil.NewMergeLogger(plog)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -60,7 +60,7 @@ func writeError(w http.ResponseWriter, r *http.Request, err error) {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
switch err {
|
switch err {
|
||||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers:
|
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
|
||||||
mlog.MergeError(err)
|
mlog.MergeError(err)
|
||||||
default:
|
default:
|
||||||
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api/v2http", "httptypes")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
|
||||||
)
|
)
|
||||||
|
|
||||||
type HTTPError struct {
|
type HTTPError struct {
|
||||||
|
|
|
||||||
|
|
@ -26,14 +26,14 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
peerMembersPrefix = "/members"
|
peerMembersPrefix = "/members"
|
||||||
leasesPrefix = "/leases"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
||||||
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
|
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
|
||||||
var lh http.Handler
|
var lh http.Handler
|
||||||
if l := s.Lessor(); l != nil {
|
l := s.Lessor()
|
||||||
lh = leasehttp.NewHandler(l)
|
if l != nil {
|
||||||
|
lh = leasehttp.NewHandler(l, func() <-chan struct{} { return s.ApplyWait() })
|
||||||
}
|
}
|
||||||
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
|
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
|
||||||
}
|
}
|
||||||
|
|
@ -49,7 +49,8 @@ func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler
|
||||||
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
|
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
|
||||||
mux.Handle(peerMembersPrefix, mh)
|
mux.Handle(peerMembersPrefix, mh)
|
||||||
if leaseHandler != nil {
|
if leaseHandler != nil {
|
||||||
mux.Handle(leasesPrefix, leaseHandler)
|
mux.Handle(leasehttp.LeasePrefix, leaseHandler)
|
||||||
|
mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
|
||||||
}
|
}
|
||||||
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
|
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
|
||||||
return mux
|
return mux
|
||||||
|
|
|
||||||
|
|
@ -19,14 +19,13 @@ import (
|
||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
|
grpclog.SetLogger(plog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,6 @@
|
||||||
package v3rpc
|
package v3rpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -25,6 +24,7 @@ import (
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"github.com/coreos/etcd/raft"
|
"github.com/coreos/etcd/raft"
|
||||||
|
|
||||||
|
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
|
@ -53,7 +53,8 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return metricsUnaryInterceptor(ctx, req, info, handler)
|
|
||||||
|
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -88,44 +89,11 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return metricsStreamInterceptor(srv, ss, info, handler)
|
|
||||||
|
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func metricsUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
|
||||||
service, method := splitMethodName(info.FullMethod)
|
|
||||||
receivedCounter.WithLabelValues(service, method).Inc()
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
resp, err = handler(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
|
||||||
}
|
|
||||||
handlingDuration.WithLabelValues(service, method).Observe(time.Since(start).Seconds())
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func metricsStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
|
||||||
service, method := splitMethodName(info.FullMethod)
|
|
||||||
receivedCounter.WithLabelValues(service, method).Inc()
|
|
||||||
|
|
||||||
err := handler(srv, ss)
|
|
||||||
if err != nil {
|
|
||||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitMethodName(fullMethodName string) (string, string) {
|
|
||||||
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
|
|
||||||
if i := strings.Index(fullMethodName, "/"); i >= 0 {
|
|
||||||
return fullMethodName[:i], fullMethodName[i+1:]
|
|
||||||
}
|
|
||||||
return "unknown", "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
type serverStreamWithCtx struct {
|
type serverStreamWithCtx struct {
|
||||||
grpc.ServerStream
|
grpc.ServerStream
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v3rpc")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||||
|
|
||||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||||
// and Txn.Failure can have at most 128 operations.
|
// and Txn.Failure can have at most 128 operations.
|
||||||
|
|
@ -56,7 +56,7 @@ func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResp
|
||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||||
|
|
@ -73,7 +73,7 @@ func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse,
|
||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||||
|
|
@ -90,7 +90,7 @@ func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*
|
||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
|
|
@ -107,7 +107,7 @@ func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse,
|
||||||
plog.Panic("unexpected nil resp.Header")
|
plog.Panic("unexpected nil resp.Header")
|
||||||
}
|
}
|
||||||
s.hdr.fill(resp.Header)
|
s.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
@ -35,20 +34,27 @@ func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||||
|
|
||||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||||
if err == lease.ErrLeaseExists {
|
|
||||||
return nil, rpctypes.ErrGRPCLeaseExist
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, togRPCError(err)
|
||||||
}
|
}
|
||||||
ls.hdr.fill(resp.Header)
|
ls.hdr.fill(resp.Header)
|
||||||
return resp, err
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, rpctypes.ErrGRPCLeaseNotFound
|
return nil, togRPCError(err)
|
||||||
|
}
|
||||||
|
ls.hdr.fill(resp.Header)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||||
|
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, togRPCError(err)
|
||||||
}
|
}
|
||||||
ls.hdr.fill(resp.Header)
|
ls.hdr.fill(resp.Header)
|
||||||
return resp, nil
|
return resp, nil
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/auth"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
|
|
@ -45,6 +46,10 @@ type RaftStatusGetter interface {
|
||||||
Leader() types.ID
|
Leader() types.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AuthGetter interface {
|
||||||
|
AuthStore() auth.AuthStore
|
||||||
|
}
|
||||||
|
|
||||||
type maintenanceServer struct {
|
type maintenanceServer struct {
|
||||||
rg RaftStatusGetter
|
rg RaftStatusGetter
|
||||||
kg KVGetter
|
kg KVGetter
|
||||||
|
|
@ -54,7 +59,8 @@ type maintenanceServer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||||
return &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||||
|
return &authMaintenanceServer{srv, s}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||||
|
|
@ -139,3 +145,49 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
|
||||||
ms.hdr.fill(resp.Header)
|
ms.hdr.fill(resp.Header)
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type authMaintenanceServer struct {
|
||||||
|
*maintenanceServer
|
||||||
|
ag AuthGetter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||||
|
authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||||
|
if err := ams.isAuthenticated(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||||
|
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||||
|
if err := ams.isAuthenticated(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Hash(ctx, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||||
|
if err := ams.isAuthenticated(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ams.maintenanceServer.Status(ctx, ar)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,6 @@ import (
|
||||||
"github.com/coreos/etcd/etcdserver/membership"
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ClusterServer struct {
|
type ClusterServer struct {
|
||||||
|
|
@ -50,14 +48,8 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
m := membership.NewMember("", urls, "", &now)
|
m := membership.NewMember("", urls, "", &now)
|
||||||
err = cs.server.AddMember(ctx, *m)
|
if err = cs.server.AddMember(ctx, *m); err != nil {
|
||||||
switch {
|
return nil, togRPCError(err)
|
||||||
case err == membership.ErrIDExists:
|
|
||||||
return nil, rpctypes.ErrGRPCMemberExist
|
|
||||||
case err == membership.ErrPeerURLexists:
|
|
||||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
|
||||||
case err != nil:
|
|
||||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pb.MemberAddResponse{
|
return &pb.MemberAddResponse{
|
||||||
|
|
@ -67,16 +59,9 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||||
err := cs.server.RemoveMember(ctx, r.ID)
|
if err := cs.server.RemoveMember(ctx, r.ID); err != nil {
|
||||||
switch {
|
return nil, togRPCError(err)
|
||||||
case err == membership.ErrIDRemoved:
|
|
||||||
fallthrough
|
|
||||||
case err == membership.ErrIDNotFound:
|
|
||||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
|
||||||
case err != nil:
|
|
||||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -85,16 +70,9 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
|
||||||
ID: types.ID(r.ID),
|
ID: types.ID(r.ID),
|
||||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||||
}
|
}
|
||||||
err := cs.server.UpdateMember(ctx, m)
|
if err := cs.server.UpdateMember(ctx, m); err != nil {
|
||||||
switch {
|
return nil, togRPCError(err)
|
||||||
case err == membership.ErrPeerURLexists:
|
|
||||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
|
||||||
case err == membership.ErrIDNotFound:
|
|
||||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
|
||||||
case err != nil:
|
|
||||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,31 +17,6 @@ package v3rpc
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
import "github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
receivedCounter = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "grpc",
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Counter of received requests.",
|
|
||||||
}, []string{"grpc_service", "grpc_method"})
|
|
||||||
|
|
||||||
failedCounter = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "grpc",
|
|
||||||
Name: "requests_failed_total",
|
|
||||||
Help: "Counter of failed requests.",
|
|
||||||
}, []string{"grpc_service", "grpc_method", "grpc_code"})
|
|
||||||
|
|
||||||
handlingDuration = prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Namespace: "etcd",
|
|
||||||
Subsystem: "grpc",
|
|
||||||
Name: "unary_requests_duration_seconds",
|
|
||||||
Help: "Bucketed histogram of processing time (s) of handled unary (non-stream) requests.",
|
|
||||||
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
|
|
||||||
}, []string{"grpc_service", "grpc_method"})
|
|
||||||
|
|
||||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "network",
|
Subsystem: "network",
|
||||||
|
|
@ -58,10 +33,6 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
prometheus.MustRegister(receivedCounter)
|
|
||||||
prometheus.MustRegister(failedCounter)
|
|
||||||
prometheus.MustRegister(handlingDuration)
|
|
||||||
|
|
||||||
prometheus.MustRegister(sentBytes)
|
prometheus.MustRegister(sentBytes)
|
||||||
prometheus.MustRegister(receivedBytes)
|
prometheus.MustRegister(receivedBytes)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -31,23 +31,28 @@ var (
|
||||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||||
|
|
||||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
|
||||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||||
|
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||||
|
|
||||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||||
|
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
|
||||||
|
|
||||||
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
||||||
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
||||||
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
||||||
|
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty")
|
||||||
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
||||||
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
||||||
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
||||||
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
||||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
|
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
|
||||||
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||||
|
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||||
|
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||||
|
|
||||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||||
|
|
@ -68,16 +73,19 @@ var (
|
||||||
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||||
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||||
|
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||||
|
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||||
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||||
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||||
|
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||||
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||||
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||||
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||||
|
|
@ -85,6 +93,8 @@ var (
|
||||||
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||||
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||||
|
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||||
|
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||||
|
|
||||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||||
|
|
@ -106,16 +116,19 @@ var (
|
||||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||||
|
|
||||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
|
||||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||||
|
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||||
|
|
||||||
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
||||||
|
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
|
||||||
|
|
||||||
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
||||||
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
||||||
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
||||||
|
ErrUserEmpty = Error(ErrGRPCUserEmpty)
|
||||||
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
||||||
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
||||||
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
||||||
|
|
@ -123,6 +136,8 @@ var (
|
||||||
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
||||||
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||||
|
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||||
|
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||||
|
|
||||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"github.com/coreos/etcd/auth"
|
"github.com/coreos/etcd/auth"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
@ -26,17 +27,29 @@ import (
|
||||||
|
|
||||||
func togRPCError(err error) error {
|
func togRPCError(err error) error {
|
||||||
switch err {
|
switch err {
|
||||||
|
case membership.ErrIDRemoved:
|
||||||
|
return rpctypes.ErrGRPCMemberNotFound
|
||||||
|
case membership.ErrIDNotFound:
|
||||||
|
return rpctypes.ErrGRPCMemberNotFound
|
||||||
|
case membership.ErrIDExists:
|
||||||
|
return rpctypes.ErrGRPCMemberExist
|
||||||
|
case membership.ErrPeerURLexists:
|
||||||
|
return rpctypes.ErrGRPCPeerURLExist
|
||||||
|
case etcdserver.ErrNotEnoughStartedMembers:
|
||||||
|
return rpctypes.ErrMemberNotEnoughStarted
|
||||||
|
|
||||||
case mvcc.ErrCompacted:
|
case mvcc.ErrCompacted:
|
||||||
return rpctypes.ErrGRPCCompacted
|
return rpctypes.ErrGRPCCompacted
|
||||||
case mvcc.ErrFutureRev:
|
case mvcc.ErrFutureRev:
|
||||||
return rpctypes.ErrGRPCFutureRev
|
return rpctypes.ErrGRPCFutureRev
|
||||||
case lease.ErrLeaseNotFound:
|
case lease.ErrLeaseNotFound:
|
||||||
return rpctypes.ErrGRPCLeaseNotFound
|
return rpctypes.ErrGRPCLeaseNotFound
|
||||||
// TODO: handle error from raft and timeout
|
|
||||||
case etcdserver.ErrRequestTooLarge:
|
case etcdserver.ErrRequestTooLarge:
|
||||||
return rpctypes.ErrGRPCRequestTooLarge
|
return rpctypes.ErrGRPCRequestTooLarge
|
||||||
case etcdserver.ErrNoSpace:
|
case etcdserver.ErrNoSpace:
|
||||||
return rpctypes.ErrGRPCNoSpace
|
return rpctypes.ErrGRPCNoSpace
|
||||||
|
case etcdserver.ErrTooManyRequests:
|
||||||
|
return rpctypes.ErrTooManyRequests
|
||||||
|
|
||||||
case etcdserver.ErrNoLeader:
|
case etcdserver.ErrNoLeader:
|
||||||
return rpctypes.ErrGRPCNoLeader
|
return rpctypes.ErrGRPCNoLeader
|
||||||
|
|
@ -48,6 +61,13 @@ func togRPCError(err error) error {
|
||||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||||
|
case etcdserver.ErrUnhealthy:
|
||||||
|
return rpctypes.ErrGRPCUnhealthy
|
||||||
|
|
||||||
|
case lease.ErrLeaseNotFound:
|
||||||
|
return rpctypes.ErrGRPCLeaseNotFound
|
||||||
|
case lease.ErrLeaseExists:
|
||||||
|
return rpctypes.ErrGRPCLeaseExist
|
||||||
|
|
||||||
case auth.ErrRootUserNotExist:
|
case auth.ErrRootUserNotExist:
|
||||||
return rpctypes.ErrGRPCRootUserNotExist
|
return rpctypes.ErrGRPCRootUserNotExist
|
||||||
|
|
@ -55,6 +75,8 @@ func togRPCError(err error) error {
|
||||||
return rpctypes.ErrGRPCRootRoleNotExist
|
return rpctypes.ErrGRPCRootRoleNotExist
|
||||||
case auth.ErrUserAlreadyExist:
|
case auth.ErrUserAlreadyExist:
|
||||||
return rpctypes.ErrGRPCUserAlreadyExist
|
return rpctypes.ErrGRPCUserAlreadyExist
|
||||||
|
case auth.ErrUserEmpty:
|
||||||
|
return rpctypes.ErrGRPCUserEmpty
|
||||||
case auth.ErrUserNotFound:
|
case auth.ErrUserNotFound:
|
||||||
return rpctypes.ErrGRPCUserNotFound
|
return rpctypes.ErrGRPCUserNotFound
|
||||||
case auth.ErrRoleAlreadyExist:
|
case auth.ErrRoleAlreadyExist:
|
||||||
|
|
@ -69,7 +91,11 @@ func togRPCError(err error) error {
|
||||||
return rpctypes.ErrGRPCRoleNotGranted
|
return rpctypes.ErrGRPCRoleNotGranted
|
||||||
case auth.ErrPermissionNotGranted:
|
case auth.ErrPermissionNotGranted:
|
||||||
return rpctypes.ErrGRPCPermissionNotGranted
|
return rpctypes.ErrGRPCPermissionNotGranted
|
||||||
|
case auth.ErrAuthNotEnabled:
|
||||||
|
return rpctypes.ErrGRPCAuthNotEnabled
|
||||||
|
case auth.ErrInvalidAuthToken:
|
||||||
|
return rpctypes.ErrGRPCInvalidAuthToken
|
||||||
default:
|
default:
|
||||||
return grpc.Errorf(codes.Internal, err.Error())
|
return grpc.Errorf(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -92,6 +92,7 @@ type serverWatchStream struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
// progress tracks the watchID that stream might need to send
|
// progress tracks the watchID that stream might need to send
|
||||||
// progress to.
|
// progress to.
|
||||||
|
// TODO: combine progress and prevKV into a single struct?
|
||||||
progress map[mvcc.WatchID]bool
|
progress map[mvcc.WatchID]bool
|
||||||
prevKV map[mvcc.WatchID]bool
|
prevKV map[mvcc.WatchID]bool
|
||||||
|
|
||||||
|
|
@ -130,10 +131,14 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||||
// but when stream.Context().Done() is closed, the stream's recv
|
// but when stream.Context().Done() is closed, the stream's recv
|
||||||
// may continue to block since it uses a different context, leading to
|
// may continue to block since it uses a different context, leading to
|
||||||
// deadlock when calling sws.close().
|
// deadlock when calling sws.close().
|
||||||
go func() { errc <- sws.recvLoop() }()
|
go func() {
|
||||||
|
if rerr := sws.recvLoop(); rerr != nil {
|
||||||
|
errc <- rerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
select {
|
select {
|
||||||
case err = <-errc:
|
case err = <-errc:
|
||||||
|
close(sws.ctrlStream)
|
||||||
case <-stream.Context().Done():
|
case <-stream.Context().Done():
|
||||||
err = stream.Context().Err()
|
err = stream.Context().Err()
|
||||||
// the only server-side cancellation is noleader for now.
|
// the only server-side cancellation is noleader for now.
|
||||||
|
|
@ -146,7 +151,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sws *serverWatchStream) recvLoop() error {
|
func (sws *serverWatchStream) recvLoop() error {
|
||||||
defer close(sws.ctrlStream)
|
|
||||||
for {
|
for {
|
||||||
req, err := sws.gRPCStream.Recv()
|
req, err := sws.gRPCStream.Recv()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
|
@ -171,12 +175,14 @@ func (sws *serverWatchStream) recvLoop() error {
|
||||||
// support >= key queries
|
// support >= key queries
|
||||||
creq.RangeEnd = []byte{}
|
creq.RangeEnd = []byte{}
|
||||||
}
|
}
|
||||||
|
filters := FiltersFromRequest(creq)
|
||||||
|
|
||||||
wsrev := sws.watchStream.Rev()
|
wsrev := sws.watchStream.Rev()
|
||||||
rev := creq.StartRevision
|
rev := creq.StartRevision
|
||||||
if rev == 0 {
|
if rev == 0 {
|
||||||
rev = wsrev + 1
|
rev = wsrev + 1
|
||||||
}
|
}
|
||||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
|
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||||
if id != -1 {
|
if id != -1 {
|
||||||
sws.mu.Lock()
|
sws.mu.Lock()
|
||||||
if creq.ProgressNotify {
|
if creq.ProgressNotify {
|
||||||
|
|
@ -353,3 +359,25 @@ func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
||||||
RaftTerm: sws.raftTimer.Term(),
|
RaftTerm: sws.raftTimer.Term(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func filterNoDelete(e mvccpb.Event) bool {
|
||||||
|
return e.Type == mvccpb.DELETE
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterNoPut(e mvccpb.Event) bool {
|
||||||
|
return e.Type == mvccpb.PUT
|
||||||
|
}
|
||||||
|
|
||||||
|
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||||
|
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||||
|
for _, ft := range creq.Filters {
|
||||||
|
switch ft {
|
||||||
|
case pb.WatchCreateRequest_NOPUT:
|
||||||
|
filters = append(filters, filterNoPut)
|
||||||
|
case pb.WatchCreateRequest_NODELETE:
|
||||||
|
filters = append(filters, filterNoDelete)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filters
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ const (
|
||||||
// to apply functions instead of a valid txn ID.
|
// to apply functions instead of a valid txn ID.
|
||||||
noTxn = -1
|
noTxn = -1
|
||||||
|
|
||||||
warnApplyDuration = 10 * time.Millisecond
|
warnApplyDuration = 100 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
type applyResult struct {
|
type applyResult struct {
|
||||||
|
|
@ -258,7 +258,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||||
}
|
}
|
||||||
|
|
||||||
limit := r.Limit
|
limit := r.Limit
|
||||||
if r.SortOrder != pb.RangeRequest_NONE {
|
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||||
|
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||||
|
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||||
// fetch everything; sort and truncate afterwards
|
// fetch everything; sort and truncate afterwards
|
||||||
limit = 0
|
limit = 0
|
||||||
}
|
}
|
||||||
|
|
@ -285,7 +287,31 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.SortOrder != pb.RangeRequest_NONE {
|
if r.MaxModRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
if r.MinModRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
if r.MaxCreateRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
if r.MinCreateRevision != 0 {
|
||||||
|
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||||
|
pruneKVs(rr, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
sortOrder := r.SortOrder
|
||||||
|
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||||
|
// Since current mvcc.Range implementation returns results
|
||||||
|
// sorted by keys in lexiographically ascending order,
|
||||||
|
// sort ASCEND by default only when target is not 'KEY'
|
||||||
|
sortOrder = pb.RangeRequest_ASCEND
|
||||||
|
}
|
||||||
|
if sortOrder != pb.RangeRequest_NONE {
|
||||||
var sorter sort.Interface
|
var sorter sort.Interface
|
||||||
switch {
|
switch {
|
||||||
case r.SortTarget == pb.RangeRequest_KEY:
|
case r.SortTarget == pb.RangeRequest_KEY:
|
||||||
|
|
@ -300,9 +326,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case r.SortOrder == pb.RangeRequest_ASCEND:
|
case sortOrder == pb.RangeRequest_ASCEND:
|
||||||
sort.Sort(sorter)
|
sort.Sort(sorter)
|
||||||
case r.SortOrder == pb.RangeRequest_DESCEND:
|
case sortOrder == pb.RangeRequest_DESCEND:
|
||||||
sort.Sort(sort.Reverse(sorter))
|
sort.Sort(sort.Reverse(sorter))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -345,34 +371,23 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
revision := a.s.KV().Rev()
|
|
||||||
|
|
||||||
// When executing the operations of txn, we need to hold the txn lock.
|
// When executing the operations of txn, we need to hold the txn lock.
|
||||||
// So the reader will not see any intermediate results.
|
// So the reader will not see any intermediate results.
|
||||||
txnID := a.s.KV().TxnBegin()
|
txnID := a.s.KV().TxnBegin()
|
||||||
defer func() {
|
|
||||||
err := a.s.KV().TxnEnd(txnID)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
resps := make([]*pb.ResponseOp, len(reqs))
|
resps := make([]*pb.ResponseOp, len(reqs))
|
||||||
changedKV := false
|
|
||||||
for i := range reqs {
|
for i := range reqs {
|
||||||
if reqs[i].GetRequestRange() == nil {
|
|
||||||
changedKV = true
|
|
||||||
}
|
|
||||||
resps[i] = a.applyUnion(txnID, reqs[i])
|
resps[i] = a.applyUnion(txnID, reqs[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
if changedKV {
|
err := a.s.KV().TxnEnd(txnID)
|
||||||
revision += 1
|
if err != nil {
|
||||||
|
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||||
}
|
}
|
||||||
|
|
||||||
txnResp := &pb.TxnResponse{}
|
txnResp := &pb.TxnResponse{}
|
||||||
txnResp.Header = &pb.ResponseHeader{}
|
txnResp.Header = &pb.ResponseHeader{}
|
||||||
txnResp.Header.Revision = revision
|
txnResp.Header.Revision = a.s.KV().Rev()
|
||||||
txnResp.Responses = resps
|
txnResp.Responses = resps
|
||||||
txnResp.Succeeded = ok
|
txnResp.Succeeded = ok
|
||||||
return txnResp, nil
|
return txnResp, nil
|
||||||
|
|
@ -436,6 +451,10 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
||||||
if result != 0 {
|
if result != 0 {
|
||||||
return rev, false
|
return rev, false
|
||||||
}
|
}
|
||||||
|
case pb.Compare_NOT_EQUAL:
|
||||||
|
if result == 0 {
|
||||||
|
return rev, false
|
||||||
|
}
|
||||||
case pb.Compare_GREATER:
|
case pb.Compare_GREATER:
|
||||||
if result != 1 {
|
if result != 1 {
|
||||||
return rev, false
|
return rev, false
|
||||||
|
|
@ -454,7 +473,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||||
if tv.RequestRange != nil {
|
if tv.RequestRange != nil {
|
||||||
resp, err := a.Range(txnID, tv.RequestRange)
|
resp, err := a.Range(txnID, tv.RequestRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("unexpected error during txn")
|
plog.Panicf("unexpected error during txn: %v", err)
|
||||||
}
|
}
|
||||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||||
}
|
}
|
||||||
|
|
@ -462,7 +481,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||||
if tv.RequestPut != nil {
|
if tv.RequestPut != nil {
|
||||||
resp, err := a.Put(txnID, tv.RequestPut)
|
resp, err := a.Put(txnID, tv.RequestPut)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("unexpected error during txn")
|
plog.Panicf("unexpected error during txn: %v", err)
|
||||||
}
|
}
|
||||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||||
}
|
}
|
||||||
|
|
@ -470,7 +489,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||||
if tv.RequestDeleteRange != nil {
|
if tv.RequestDeleteRange != nil {
|
||||||
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("unexpected error during txn")
|
plog.Panicf("unexpected error during txn: %v", err)
|
||||||
}
|
}
|
||||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||||
}
|
}
|
||||||
|
|
@ -500,7 +519,7 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
|
||||||
resp := &pb.LeaseGrantResponse{}
|
resp := &pb.LeaseGrantResponse{}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
resp.ID = int64(l.ID)
|
resp.ID = int64(l.ID)
|
||||||
resp.TTL = l.TTL
|
resp.TTL = l.TTL()
|
||||||
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -784,3 +803,36 @@ func compareInt64(a, b int64) int {
|
||||||
func isGteRange(rangeEnd []byte) bool {
|
func isGteRange(rangeEnd []byte) bool {
|
||||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||||
|
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||||
|
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||||
|
j := 0
|
||||||
|
for i := 0; i < len(ops); i++ {
|
||||||
|
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ops[j] = ops[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
|
||||||
|
return ops[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.Success = f(txn.Success)
|
||||||
|
txn.Failure = f(txn.Failure)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||||
|
j := 0
|
||||||
|
for i := range rr.KVs {
|
||||||
|
rr.KVs[j] = rr.KVs[i]
|
||||||
|
if !isPrunable(&rr.KVs[i]) {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rr.KVs = rr.KVs[:j]
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,8 +27,9 @@ type authApplierV3 struct {
|
||||||
|
|
||||||
// mu serializes Apply so that user isn't corrupted and so that
|
// mu serializes Apply so that user isn't corrupted and so that
|
||||||
// serialized requests don't leak data from TOCTOU errors
|
// serialized requests don't leak data from TOCTOU errors
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
user string
|
|
||||||
|
authInfo auth.AuthInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||||
|
|
@ -41,45 +42,57 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||||
if r.Header != nil {
|
if r.Header != nil {
|
||||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||||
// does not have header field
|
// does not have header field
|
||||||
aa.user = r.Header.Username
|
aa.authInfo.Username = r.Header.Username
|
||||||
|
aa.authInfo.Revision = r.Header.AuthRevision
|
||||||
}
|
}
|
||||||
if needAdminPermission(r) && !aa.as.IsAdminPermitted(aa.user) {
|
if needAdminPermission(r) {
|
||||||
aa.user = ""
|
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||||
return &applyResult{err: auth.ErrPermissionDenied}
|
aa.authInfo.Username = ""
|
||||||
|
aa.authInfo.Revision = 0
|
||||||
|
return &applyResult{err: err}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ret := aa.applierV3.Apply(r)
|
ret := aa.applierV3.Apply(r)
|
||||||
aa.user = ""
|
aa.authInfo.Username = ""
|
||||||
|
aa.authInfo.Revision = 0
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||||
if !aa.as.IsPutPermitted(aa.user, r.Key) {
|
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||||
return nil, auth.ErrPermissionDenied
|
return nil, err
|
||||||
}
|
}
|
||||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, nil) {
|
if r.PrevKv {
|
||||||
return nil, auth.ErrPermissionDenied
|
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return aa.applierV3.Put(txnID, r)
|
return aa.applierV3.Put(txnID, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||||
if !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||||
return nil, auth.ErrPermissionDenied
|
return nil, err
|
||||||
}
|
}
|
||||||
return aa.applierV3.Range(txnID, r)
|
return aa.applierV3.Range(txnID, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||||
if !aa.as.IsDeleteRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||||
return nil, auth.ErrPermissionDenied
|
return nil, err
|
||||||
}
|
}
|
||||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
if r.PrevKv {
|
||||||
return nil, auth.ErrPermissionDenied
|
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return aa.applierV3.DeleteRange(txnID, r)
|
return aa.applierV3.DeleteRange(txnID, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||||
for _, requ := range reqs {
|
for _, requ := range reqs {
|
||||||
switch tv := requ.Request.(type) {
|
switch tv := requ.Request.(type) {
|
||||||
case *pb.RequestOp_RequestRange:
|
case *pb.RequestOp_RequestRange:
|
||||||
|
|
@ -87,8 +100,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aa.as.IsRangePermitted(aa.user, tv.RequestRange.Key, tv.RequestRange.RangeEnd) {
|
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||||
return false
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
case *pb.RequestOp_RequestPut:
|
case *pb.RequestOp_RequestPut:
|
||||||
|
|
@ -96,8 +109,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aa.as.IsPutPermitted(aa.user, tv.RequestPut.Key) {
|
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||||
return false
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
case *pb.RequestOp_RequestDeleteRange:
|
case *pb.RequestOp_RequestDeleteRange:
|
||||||
|
|
@ -105,29 +118,42 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if tv.RequestDeleteRange.PrevKv && !aa.as.IsRangePermitted(aa.user, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) {
|
if tv.RequestDeleteRange.PrevKv {
|
||||||
return false
|
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||||
|
for _, c := range rt.Compare {
|
||||||
|
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
for _, c := range rt.Compare {
|
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||||
if !aa.as.IsRangePermitted(aa.user, c.Key, nil) {
|
return nil, err
|
||||||
return nil, auth.ErrPermissionDenied
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aa.checkTxnReqsPermission(rt.Success) {
|
|
||||||
return nil, auth.ErrPermissionDenied
|
|
||||||
}
|
|
||||||
if !aa.checkTxnReqsPermission(rt.Failure) {
|
|
||||||
return nil, auth.ErrPermissionDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
return aa.applierV3.Txn(rt)
|
return aa.applierV3.Txn(rt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "auth")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
|
||||||
)
|
)
|
||||||
|
|
||||||
var rootRole = Role{
|
var rootRole = Role{
|
||||||
|
|
@ -167,7 +167,7 @@ func (_ passwordStore) HashPassword(password string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) AllUsers() ([]string, error) {
|
func (s *store) AllUsers() ([]string, error) {
|
||||||
resp, err := s.requestResource("/users/", false)
|
resp, err := s.requestResource("/users/", false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
|
@ -185,33 +185,13 @@ func (s *store) AllUsers() ([]string, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) GetUser(name string) (User, error) {
|
func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
|
||||||
resp, err := s.requestResource("/users/"+name, false)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
|
||||||
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return User{}, err
|
|
||||||
}
|
|
||||||
var u User
|
|
||||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
|
||||||
if err != nil {
|
|
||||||
return u, err
|
|
||||||
}
|
|
||||||
// Attach root role to root user.
|
|
||||||
if u.User == "root" {
|
|
||||||
u = attachRootRole(u)
|
|
||||||
}
|
|
||||||
return u, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOrUpdateUser should be only used for creating the new user or when you are not
|
// CreateOrUpdateUser should be only used for creating the new user or when you are not
|
||||||
// sure if it is a create or update. (When only password is passed in, we are not sure
|
// sure if it is a create or update. (When only password is passed in, we are not sure
|
||||||
// if it is a update or create)
|
// if it is a update or create)
|
||||||
func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
|
func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
|
||||||
_, err = s.GetUser(user.User)
|
_, err = s.getUser(user.User, true)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
out, err = s.UpdateUser(user)
|
out, err = s.UpdateUser(user)
|
||||||
return out, false, err
|
return out, false, err
|
||||||
|
|
@ -271,7 +251,7 @@ func (s *store) DeleteUser(name string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) UpdateUser(user User) (User, error) {
|
func (s *store) UpdateUser(user User) (User, error) {
|
||||||
old, err := s.GetUser(user.User)
|
old, err := s.getUser(user.User, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
|
@ -297,7 +277,7 @@ func (s *store) UpdateUser(user User) (User, error) {
|
||||||
|
|
||||||
func (s *store) AllRoles() ([]string, error) {
|
func (s *store) AllRoles() ([]string, error) {
|
||||||
nodes := []string{RootRoleName}
|
nodes := []string{RootRoleName}
|
||||||
resp, err := s.requestResource("/roles/", false)
|
resp, err := s.requestResource("/roles/", false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
|
@ -314,23 +294,7 @@ func (s *store) AllRoles() ([]string, error) {
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) GetRole(name string) (Role, error) {
|
func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
|
||||||
if name == RootRoleName {
|
|
||||||
return rootRole, nil
|
|
||||||
}
|
|
||||||
resp, err := s.requestResource("/roles/"+name, false)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
|
||||||
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Role{}, err
|
|
||||||
}
|
|
||||||
var r Role
|
|
||||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) CreateRole(role Role) error {
|
func (s *store) CreateRole(role Role) error {
|
||||||
if role.Role == RootRoleName {
|
if role.Role == RootRoleName {
|
||||||
|
|
@ -372,7 +336,7 @@ func (s *store) UpdateRole(role Role) (Role, error) {
|
||||||
if role.Role == RootRoleName {
|
if role.Role == RootRoleName {
|
||||||
return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
|
return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
|
||||||
}
|
}
|
||||||
old, err := s.GetRole(role.Role)
|
old, err := s.getRole(role.Role, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
|
@ -404,10 +368,10 @@ func (s *store) EnableAuth() error {
|
||||||
return authErr(http.StatusConflict, "already enabled")
|
return authErr(http.StatusConflict, "already enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := s.GetUser("root"); err != nil {
|
if _, err := s.getUser("root", true); err != nil {
|
||||||
return authErr(http.StatusConflict, "No root user available, please create one")
|
return authErr(http.StatusConflict, "No root user available, please create one")
|
||||||
}
|
}
|
||||||
if _, err := s.GetRole(GuestRoleName); err != nil {
|
if _, err := s.getRole(GuestRoleName, true); err != nil {
|
||||||
plog.Printf("no guest role access found, creating default")
|
plog.Printf("no guest role access found, creating default")
|
||||||
if err := s.CreateRole(guestRole); err != nil {
|
if err := s.CreateRole(guestRole); err != nil {
|
||||||
plog.Errorf("error creating guest role. aborting auth enable.")
|
plog.Errorf("error creating guest role. aborting auth enable.")
|
||||||
|
|
@ -641,3 +605,43 @@ func attachRootRole(u User) User {
|
||||||
}
|
}
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *store) getUser(name string, quorum bool) (User, error) {
|
||||||
|
resp, err := s.requestResource("/users/"+name, false, quorum)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return User{}, err
|
||||||
|
}
|
||||||
|
var u User
|
||||||
|
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
||||||
|
if err != nil {
|
||||||
|
return u, err
|
||||||
|
}
|
||||||
|
// Attach root role to root user.
|
||||||
|
if u.User == "root" {
|
||||||
|
u = attachRootRole(u)
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) getRole(name string, quorum bool) (Role, error) {
|
||||||
|
if name == RootRoleName {
|
||||||
|
return rootRole, nil
|
||||||
|
}
|
||||||
|
resp, err := s.requestResource("/roles/"+name, false, quorum)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Role{}, err
|
||||||
|
}
|
||||||
|
var r Role
|
||||||
|
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,7 @@ func (s *store) detectAuth() bool {
|
||||||
if s.server == nil {
|
if s.server == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
value, err := s.requestResource("/enabled", false)
|
value, err := s.requestResource("/enabled", false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*etcderr.Error); ok {
|
if e, ok := err.(*etcderr.Error); ok {
|
||||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||||
|
|
@ -105,12 +105,16 @@ func (s *store) detectAuth() bool {
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) requestResource(res string, dir bool) (etcdserver.Response, error) {
|
func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
|
ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
p := path.Join(StorePermsPrefix, res)
|
p := path.Join(StorePermsPrefix, res)
|
||||||
|
method := "GET"
|
||||||
|
if quorum {
|
||||||
|
method = "QGET"
|
||||||
|
}
|
||||||
rr := etcdserverpb.Request{
|
rr := etcdserverpb.Request{
|
||||||
Method: "GET",
|
Method: method,
|
||||||
Path: p,
|
Path: p,
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -94,7 +94,16 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return membership.NewClusterFromMembers("", id, membs), nil
|
|
||||||
|
// check the length of membership members
|
||||||
|
// if the membership members are present then prepare and return raft cluster
|
||||||
|
// if membership members are not present then the raft cluster formed will be
|
||||||
|
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||||
|
if len(membs) > 0 {
|
||||||
|
return membership.NewClusterFromMembers("", id, membs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,11 +16,13 @@ package etcdserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/netutil"
|
"github.com/coreos/etcd/pkg/netutil"
|
||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
|
@ -55,8 +57,6 @@ type ServerConfig struct {
|
||||||
|
|
||||||
StrictReconfigCheck bool
|
StrictReconfigCheck bool
|
||||||
|
|
||||||
EnablePprof bool
|
|
||||||
|
|
||||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||||
ClientCertAuthEnabled bool
|
ClientCertAuthEnabled bool
|
||||||
}
|
}
|
||||||
|
|
@ -64,7 +64,10 @@ type ServerConfig struct {
|
||||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||||
// and returns an error for things that should never happen.
|
// and returns an error for things that should never happen.
|
||||||
func (c *ServerConfig) VerifyBootstrap() error {
|
func (c *ServerConfig) VerifyBootstrap() error {
|
||||||
if err := c.verifyLocalMember(true); err != nil {
|
if err := c.hasLocalMember(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.advertiseMatchesCluster(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||||
|
|
@ -79,10 +82,9 @@ func (c *ServerConfig) VerifyBootstrap() error {
|
||||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||||
// case and returns an error for things that should never happen.
|
// case and returns an error for things that should never happen.
|
||||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||||
// no need for strict checking since the member have announced its
|
// The member has announced its peer urls to the cluster before starting; no need to
|
||||||
// peer urls to the cluster before starting and do not have to set
|
// set the configuration again.
|
||||||
// it in the configuration again.
|
if err := c.hasLocalMember(); err != nil {
|
||||||
if err := c.verifyLocalMember(false); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||||
|
|
@ -94,39 +96,38 @@ func (c *ServerConfig) VerifyJoinExisting() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyLocalMember verifies the configured member is in configured
|
// hasLocalMember checks that the cluster at least contains the local server.
|
||||||
// cluster. If strict is set, it also verifies the configured member
|
func (c *ServerConfig) hasLocalMember() error {
|
||||||
// has the same peer urls as configured advertised peer urls.
|
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||||
func (c *ServerConfig) verifyLocalMember(strict bool) error {
|
|
||||||
urls := c.InitialPeerURLsMap[c.Name]
|
|
||||||
// Make sure the cluster at least contains the local server.
|
|
||||||
if urls == nil {
|
|
||||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advertised peer URLs must match those in the cluster peer list
|
|
||||||
apurls := c.PeerURLs.StringSlice()
|
|
||||||
sort.Strings(apurls)
|
|
||||||
urls.Sort()
|
|
||||||
if strict {
|
|
||||||
if !netutil.URLStringsEqual(apurls, urls.StringSlice()) {
|
|
||||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
|
||||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServerConfig) MemberDir() string { return path.Join(c.DataDir, "member") }
|
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||||
|
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||||
|
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||||
|
urls.Sort()
|
||||||
|
sort.Strings(apurls)
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||||
|
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||||
|
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||||
|
|
||||||
func (c *ServerConfig) WALDir() string {
|
func (c *ServerConfig) WALDir() string {
|
||||||
if c.DedicatedWALDir != "" {
|
if c.DedicatedWALDir != "" {
|
||||||
return c.DedicatedWALDir
|
return c.DedicatedWALDir
|
||||||
}
|
}
|
||||||
return path.Join(c.MemberDir(), "wal")
|
return filepath.Join(c.MemberDir(), "wal")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServerConfig) SnapDir() string { return path.Join(c.MemberDir(), "snap") }
|
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||||
|
|
||||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,11 +26,13 @@ var (
|
||||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
ErrTimeout = errors.New("etcdserver: request timed out")
|
||||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||||
|
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||||
ErrNoSpace = errors.New("etcdserver: no space")
|
ErrNoSpace = errors.New("etcdserver: no space")
|
||||||
ErrInvalidAuthToken = errors.New("etcdserver: invalid auth token")
|
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||||
|
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||||
)
|
)
|
||||||
|
|
||||||
type DiscoveryError struct {
|
type DiscoveryError struct {
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,8 @@
|
||||||
LeaseRevokeResponse
|
LeaseRevokeResponse
|
||||||
LeaseKeepAliveRequest
|
LeaseKeepAliveRequest
|
||||||
LeaseKeepAliveResponse
|
LeaseKeepAliveResponse
|
||||||
|
LeaseTimeToLiveRequest
|
||||||
|
LeaseTimeToLiveResponse
|
||||||
Member
|
Member
|
||||||
MemberAddRequest
|
MemberAddRequest
|
||||||
MemberAddResponse
|
MemberAddResponse
|
||||||
|
|
@ -113,26 +115,28 @@ var _ = math.Inf
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.ProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD" json:"ID"`
|
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
|
||||||
Method string `protobuf:"bytes,2,opt,name=Method,json=method" json:"Method"`
|
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
|
||||||
Path string `protobuf:"bytes,3,opt,name=Path,json=path" json:"Path"`
|
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
|
||||||
Val string `protobuf:"bytes,4,opt,name=Val,json=val" json:"Val"`
|
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
|
||||||
Dir bool `protobuf:"varint,5,opt,name=Dir,json=dir" json:"Dir"`
|
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
|
||||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue,json=prevValue" json:"PrevValue"`
|
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
|
||||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex,json=prevIndex" json:"PrevIndex"`
|
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
|
||||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist,json=prevExist" json:"PrevExist,omitempty"`
|
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
|
||||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration,json=expiration" json:"Expiration"`
|
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
|
||||||
Wait bool `protobuf:"varint,10,opt,name=Wait,json=wait" json:"Wait"`
|
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
|
||||||
Since uint64 `protobuf:"varint,11,opt,name=Since,json=since" json:"Since"`
|
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
|
||||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive,json=recursive" json:"Recursive"`
|
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
|
||||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted,json=sorted" json:"Sorted"`
|
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
|
||||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum,json=quorum" json:"Quorum"`
|
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
|
||||||
Time int64 `protobuf:"varint,15,opt,name=Time,json=time" json:"Time"`
|
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
|
||||||
Stream bool `protobuf:"varint,16,opt,name=Stream,json=stream" json:"Stream"`
|
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
|
||||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh,json=refresh" json:"Refresh,omitempty"`
|
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -142,8 +146,8 @@ func (*Request) ProtoMessage() {}
|
||||||
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
||||||
|
|
||||||
type Metadata struct {
|
type Metadata struct {
|
||||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID,json=nodeID" json:"NodeID"`
|
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
|
||||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID,json=clusterID" json:"ClusterID"`
|
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -156,182 +160,182 @@ func init() {
|
||||||
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
||||||
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
||||||
}
|
}
|
||||||
func (m *Request) Marshal() (data []byte, err error) {
|
func (m *Request) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Request) MarshalTo(data []byte) (int, error) {
|
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.ID))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Method)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
|
||||||
i += copy(data[i:], m.Method)
|
i += copy(dAtA[i:], m.Method)
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Path)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
|
||||||
i += copy(data[i:], m.Path)
|
i += copy(dAtA[i:], m.Path)
|
||||||
data[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Val)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
|
||||||
i += copy(data[i:], m.Val)
|
i += copy(dAtA[i:], m.Val)
|
||||||
data[i] = 0x28
|
dAtA[i] = 0x28
|
||||||
i++
|
i++
|
||||||
if m.Dir {
|
if m.Dir {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x32
|
dAtA[i] = 0x32
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.PrevValue)))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
|
||||||
i += copy(data[i:], m.PrevValue)
|
i += copy(dAtA[i:], m.PrevValue)
|
||||||
data[i] = 0x38
|
dAtA[i] = 0x38
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.PrevIndex))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
|
||||||
if m.PrevExist != nil {
|
if m.PrevExist != nil {
|
||||||
data[i] = 0x40
|
dAtA[i] = 0x40
|
||||||
i++
|
i++
|
||||||
if *m.PrevExist {
|
if *m.PrevExist {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
data[i] = 0x48
|
dAtA[i] = 0x48
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.Expiration))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
|
||||||
data[i] = 0x50
|
dAtA[i] = 0x50
|
||||||
i++
|
i++
|
||||||
if m.Wait {
|
if m.Wait {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x58
|
dAtA[i] = 0x58
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.Since))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
|
||||||
data[i] = 0x60
|
dAtA[i] = 0x60
|
||||||
i++
|
i++
|
||||||
if m.Recursive {
|
if m.Recursive {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x68
|
dAtA[i] = 0x68
|
||||||
i++
|
i++
|
||||||
if m.Sorted {
|
if m.Sorted {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x70
|
dAtA[i] = 0x70
|
||||||
i++
|
i++
|
||||||
if m.Quorum {
|
if m.Quorum {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
data[i] = 0x78
|
dAtA[i] = 0x78
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.Time))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
|
||||||
data[i] = 0x80
|
dAtA[i] = 0x80
|
||||||
i++
|
i++
|
||||||
data[i] = 0x1
|
dAtA[i] = 0x1
|
||||||
i++
|
i++
|
||||||
if m.Stream {
|
if m.Stream {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
if m.Refresh != nil {
|
if m.Refresh != nil {
|
||||||
data[i] = 0x88
|
dAtA[i] = 0x88
|
||||||
i++
|
i++
|
||||||
data[i] = 0x1
|
dAtA[i] = 0x1
|
||||||
i++
|
i++
|
||||||
if *m.Refresh {
|
if *m.Refresh {
|
||||||
data[i] = 1
|
dAtA[i] = 1
|
||||||
} else {
|
} else {
|
||||||
data[i] = 0
|
dAtA[i] = 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(data[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) Marshal() (data []byte, err error) {
|
func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) MarshalTo(data []byte) (int, error) {
|
func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.NodeID))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
|
||||||
data[i] = 0x10
|
dAtA[i] = 0x10
|
||||||
i++
|
i++
|
||||||
i = encodeVarintEtcdserver(data, i, uint64(m.ClusterID))
|
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(data[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Etcdserver(data []byte, offset int, v uint64) int {
|
func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Etcdserver(data []byte, offset int, v uint32) int {
|
func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintEtcdserver(data []byte, offset int, v uint64) int {
|
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *Request) Size() (n int) {
|
func (m *Request) Size() (n int) {
|
||||||
|
|
@ -392,8 +396,8 @@ func sovEtcdserver(x uint64) (n int) {
|
||||||
func sozEtcdserver(x uint64) (n int) {
|
func sozEtcdserver(x uint64) (n int) {
|
||||||
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
func (m *Request) Unmarshal(data []byte) error {
|
func (m *Request) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -405,7 +409,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -433,7 +437,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ID |= (uint64(b) & 0x7F) << shift
|
m.ID |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -452,7 +456,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -467,7 +471,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Method = string(data[iNdEx:postIndex])
|
m.Method = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
|
|
@ -481,7 +485,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -496,7 +500,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Path = string(data[iNdEx:postIndex])
|
m.Path = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 4:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
|
|
@ -510,7 +514,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -525,7 +529,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Val = string(data[iNdEx:postIndex])
|
m.Val = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 5:
|
case 5:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
|
|
@ -539,7 +543,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -559,7 +563,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -574,7 +578,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.PrevValue = string(data[iNdEx:postIndex])
|
m.PrevValue = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 7:
|
case 7:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
|
|
@ -588,7 +592,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -607,7 +611,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -628,7 +632,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Expiration |= (int64(b) & 0x7F) << shift
|
m.Expiration |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -647,7 +651,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -667,7 +671,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Since |= (uint64(b) & 0x7F) << shift
|
m.Since |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -686,7 +690,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -706,7 +710,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -726,7 +730,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -746,7 +750,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Time |= (int64(b) & 0x7F) << shift
|
m.Time |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -765,7 +769,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -785,7 +789,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
v |= (int(b) & 0x7F) << shift
|
v |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -796,7 +800,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
m.Refresh = &b
|
m.Refresh = &b
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -806,7 +810,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -816,8 +820,8 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Metadata) Unmarshal(data []byte) error {
|
func (m *Metadata) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -829,7 +833,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -857,7 +861,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.NodeID |= (uint64(b) & 0x7F) << shift
|
m.NodeID |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -876,7 +880,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -885,7 +889,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -895,7 +899,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||||
if (iNdEx + skippy) > l {
|
if (iNdEx + skippy) > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
iNdEx += skippy
|
iNdEx += skippy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -905,8 +909,8 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipEtcdserver(data []byte) (n int, err error) {
|
func skipEtcdserver(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
|
|
@ -917,7 +921,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -935,7 +939,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -952,7 +956,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -975,7 +979,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -986,7 +990,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipEtcdserver(data[start:])
|
next, err := skipEtcdserver(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -1010,32 +1014,32 @@ var (
|
||||||
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
|
||||||
|
|
||||||
var fileDescriptorEtcdserver = []byte{
|
var fileDescriptorEtcdserver = []byte{
|
||||||
// 404 bytes of a gzipped FileDescriptorProto
|
// 380 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||||
0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1,
|
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
||||||
0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5,
|
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
||||||
0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45,
|
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
||||||
0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb,
|
0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
|
||||||
0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4,
|
0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
|
||||||
0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35,
|
0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
|
||||||
0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd,
|
0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
|
||||||
0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7,
|
0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
|
||||||
0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4,
|
0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
|
||||||
0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c,
|
0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
|
||||||
0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08,
|
0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
|
||||||
0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27,
|
0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
|
||||||
0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21,
|
0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
|
||||||
0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91,
|
0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
|
||||||
0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5,
|
0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
|
||||||
0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd,
|
0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
|
||||||
0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32,
|
0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
|
||||||
0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56,
|
0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
|
||||||
0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe,
|
0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
|
||||||
0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d,
|
0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
|
||||||
0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6,
|
0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
|
||||||
0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f,
|
0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
|
||||||
0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f,
|
0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
|
||||||
0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40,
|
|
||||||
0xa4, 0x02, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -14,6 +14,8 @@ message RequestHeader {
|
||||||
uint64 ID = 1;
|
uint64 ID = 1;
|
||||||
// username is a username that is associated with an auth token of gRPC connection
|
// username is a username that is associated with an auth token of gRPC connection
|
||||||
string username = 2;
|
string username = 2;
|
||||||
|
// auth_revision is a revision number of auth.authStore. It is not related to mvcc
|
||||||
|
uint64 auth_revision = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// An InternalRaftRequest is the union of all requests which can be
|
// An InternalRaftRequest is the union of all requests which can be
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -222,6 +222,19 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh
|
||||||
return stream, metadata, nil
|
return stream, metadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||||
|
var protoReq LeaseTimeToLiveRequest
|
||||||
|
var metadata runtime.ServerMetadata
|
||||||
|
|
||||||
|
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||||
|
return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||||
|
return msg, metadata, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||||
var protoReq MemberAddRequest
|
var protoReq MemberAddRequest
|
||||||
var metadata runtime.ServerMetadata
|
var metadata runtime.ServerMetadata
|
||||||
|
|
@ -935,6 +948,34 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
if cn, ok := w.(http.CloseNotifier); ok {
|
||||||
|
go func(done <-chan struct{}, closed <-chan bool) {
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-closed:
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}(ctx.Done(), cn.CloseNotify())
|
||||||
|
}
|
||||||
|
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||||
|
rctx, err := runtime.AnnotateContext(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||||
|
}
|
||||||
|
resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||||
|
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||||
|
if err != nil {
|
||||||
|
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -944,6 +985,8 @@ var (
|
||||||
pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, ""))
|
pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, ""))
|
||||||
|
|
||||||
pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, ""))
|
pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, ""))
|
||||||
|
|
||||||
|
pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, ""))
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -952,6 +995,8 @@ var (
|
||||||
forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
|
forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
|
||||||
|
|
||||||
forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
|
forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
|
||||||
|
|
||||||
|
forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
|
// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
|
||||||
|
|
|
||||||
|
|
@ -104,8 +104,15 @@ service Lease {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseTimeToLive retrieves lease information.
|
||||||
|
rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/v3alpha/kv/lease/timetolive"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(xiangli) List all existing Leases?
|
// TODO(xiangli) List all existing Leases?
|
||||||
// TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
service Cluster {
|
service Cluster {
|
||||||
|
|
@ -375,6 +382,22 @@ message RangeRequest {
|
||||||
|
|
||||||
// count_only when set returns only the count of the keys in the range.
|
// count_only when set returns only the count of the keys in the range.
|
||||||
bool count_only = 9;
|
bool count_only = 9;
|
||||||
|
|
||||||
|
// min_mod_revision is the lower bound for returned key mod revisions; all keys with
|
||||||
|
// lesser mod revisions will be filtered away.
|
||||||
|
int64 min_mod_revision = 10;
|
||||||
|
|
||||||
|
// max_mod_revision is the upper bound for returned key mod revisions; all keys with
|
||||||
|
// greater mod revisions will be filtered away.
|
||||||
|
int64 max_mod_revision = 11;
|
||||||
|
|
||||||
|
// min_create_revision is the lower bound for returned key create revisions; all keys with
|
||||||
|
// lesser create trevisions will be filtered away.
|
||||||
|
int64 min_create_revision = 12;
|
||||||
|
|
||||||
|
// max_create_revision is the upper bound for returned key create revisions; all keys with
|
||||||
|
// greater create revisions will be filtered away.
|
||||||
|
int64 max_create_revision = 13;
|
||||||
}
|
}
|
||||||
|
|
||||||
message RangeResponse {
|
message RangeResponse {
|
||||||
|
|
@ -413,8 +436,11 @@ message DeleteRangeRequest {
|
||||||
bytes key = 1;
|
bytes key = 1;
|
||||||
// range_end is the key following the last key to delete for the range [key, range_end).
|
// range_end is the key following the last key to delete for the range [key, range_end).
|
||||||
// If range_end is not given, the range is defined to contain only the key argument.
|
// If range_end is not given, the range is defined to contain only the key argument.
|
||||||
|
// If range_end is one bit larger than the given key, then the range is all
|
||||||
|
// the all keys with the prefix (the given key).
|
||||||
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
|
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
|
||||||
bytes range_end = 2;
|
bytes range_end = 2;
|
||||||
|
|
||||||
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
|
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
|
||||||
// The previous key-value pairs will be returned in the delte response.
|
// The previous key-value pairs will be returned in the delte response.
|
||||||
bool prev_kv = 3;
|
bool prev_kv = 3;
|
||||||
|
|
@ -451,6 +477,7 @@ message Compare {
|
||||||
EQUAL = 0;
|
EQUAL = 0;
|
||||||
GREATER = 1;
|
GREATER = 1;
|
||||||
LESS = 2;
|
LESS = 2;
|
||||||
|
NOT_EQUAL = 3;
|
||||||
}
|
}
|
||||||
enum CompareTarget {
|
enum CompareTarget {
|
||||||
VERSION = 0;
|
VERSION = 0;
|
||||||
|
|
@ -566,6 +593,8 @@ message WatchCreateRequest {
|
||||||
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
|
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
|
||||||
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
|
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
|
||||||
// or equal to the key argument are watched.
|
// or equal to the key argument are watched.
|
||||||
|
// If the range_end is one bit larger than the given key,
|
||||||
|
// then all keys with the prefix (the given key) will be watched.
|
||||||
bytes range_end = 2;
|
bytes range_end = 2;
|
||||||
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
|
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
|
||||||
int64 start_revision = 3;
|
int64 start_revision = 3;
|
||||||
|
|
@ -574,6 +603,16 @@ message WatchCreateRequest {
|
||||||
// wish to recover a disconnected watcher starting from a recent known revision.
|
// wish to recover a disconnected watcher starting from a recent known revision.
|
||||||
// The etcd server may decide how often it will send notifications based on current load.
|
// The etcd server may decide how often it will send notifications based on current load.
|
||||||
bool progress_notify = 4;
|
bool progress_notify = 4;
|
||||||
|
|
||||||
|
enum FilterType {
|
||||||
|
// filter out put event.
|
||||||
|
NOPUT = 0;
|
||||||
|
// filter out delete event.
|
||||||
|
NODELETE = 1;
|
||||||
|
}
|
||||||
|
// filters filter the events at server side before it sends back to the watcher.
|
||||||
|
repeated FilterType filters = 5;
|
||||||
|
|
||||||
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
||||||
// If the previous KV is already compacted, nothing will be returned.
|
// If the previous KV is already compacted, nothing will be returned.
|
||||||
bool prev_kv = 6;
|
bool prev_kv = 6;
|
||||||
|
|
@ -647,6 +686,25 @@ message LeaseKeepAliveResponse {
|
||||||
int64 TTL = 3;
|
int64 TTL = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message LeaseTimeToLiveRequest {
|
||||||
|
// ID is the lease ID for the lease.
|
||||||
|
int64 ID = 1;
|
||||||
|
// keys is true to query all the keys attached to this lease.
|
||||||
|
bool keys = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseTimeToLiveResponse {
|
||||||
|
ResponseHeader header = 1;
|
||||||
|
// ID is the lease ID from the keep alive request.
|
||||||
|
int64 ID = 2;
|
||||||
|
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||||
|
int64 TTL = 3;
|
||||||
|
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||||
|
int64 grantedTTL = 4;
|
||||||
|
// Keys is the list of keys attached to this lease.
|
||||||
|
repeated bytes keys = 5;
|
||||||
|
}
|
||||||
|
|
||||||
message Member {
|
message Member {
|
||||||
// ID is the member ID for this member.
|
// ID is the member ID for this member.
|
||||||
uint64 ID = 1;
|
uint64 ID = 1;
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,9 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/coreos/etcd/mvcc/backend"
|
"github.com/coreos/etcd/mvcc/backend"
|
||||||
"github.com/coreos/etcd/pkg/netutil"
|
"github.com/coreos/etcd/pkg/netutil"
|
||||||
|
|
@ -484,8 +487,10 @@ func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) erro
|
||||||
sort.Sort(MembersByPeerURLs(ems))
|
sort.Sort(MembersByPeerURLs(ems))
|
||||||
sort.Sort(MembersByPeerURLs(lms))
|
sort.Sort(MembersByPeerURLs(lms))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
for i := range ems {
|
for i := range ems {
|
||||||
if !netutil.URLStringsEqual(ems[i].PeerURLs, lms[i].PeerURLs) {
|
if !netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs) {
|
||||||
return fmt.Errorf("unmatched member while checking PeerURLs")
|
return fmt.Errorf("unmatched member while checking PeerURLs")
|
||||||
}
|
}
|
||||||
lms[i].ID = ems[i].ID
|
lms[i].ID = ems[i].ID
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "membership")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership")
|
||||||
)
|
)
|
||||||
|
|
||||||
// RaftAttributes represents the raft related attributes of an etcd member.
|
// RaftAttributes represents the raft related attributes of an etcd member.
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ var (
|
||||||
Name: "proposals_pending",
|
Name: "proposals_pending",
|
||||||
Help: "The current number of pending proposals to commit.",
|
Help: "The current number of pending proposals to commit.",
|
||||||
})
|
})
|
||||||
proposalsFailed = prometheus.NewGauge(prometheus.GaugeOpts{
|
proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: "etcd",
|
Namespace: "etcd",
|
||||||
Subsystem: "server",
|
Subsystem: "server",
|
||||||
Name: "proposals_failed_total",
|
Name: "proposals_failed_total",
|
||||||
|
|
|
||||||
|
|
@ -98,18 +98,25 @@ type raftNode struct {
|
||||||
// last lead elected time
|
// last lead elected time
|
||||||
lt time.Time
|
lt time.Time
|
||||||
|
|
||||||
|
// to check if msg receiver is removed from cluster
|
||||||
|
isIDRemoved func(id uint64) bool
|
||||||
|
|
||||||
raft.Node
|
raft.Node
|
||||||
|
|
||||||
|
// a chan to send/receive snapshot
|
||||||
|
msgSnapC chan raftpb.Message
|
||||||
|
|
||||||
// a chan to send out apply
|
// a chan to send out apply
|
||||||
applyc chan apply
|
applyc chan apply
|
||||||
|
|
||||||
// TODO: remove the etcdserver related logic from raftNode
|
// a chan to send out readState
|
||||||
// TODO: add a state machine interface to apply the commit entries
|
readStateC chan raft.ReadState
|
||||||
// and do snapshot/recover
|
|
||||||
s *EtcdServer
|
|
||||||
|
|
||||||
// utility
|
// utility
|
||||||
ticker <-chan time.Time
|
ticker <-chan time.Time
|
||||||
|
// contention detectors for raft heartbeat message
|
||||||
|
td *contention.TimeoutDetector
|
||||||
|
heartbeat time.Duration // for logging
|
||||||
raftStorage *raft.MemoryStorage
|
raftStorage *raft.MemoryStorage
|
||||||
storage Storage
|
storage Storage
|
||||||
// transport specifies the transport to send and receive msgs to members.
|
// transport specifies the transport to send and receive msgs to members.
|
||||||
|
|
@ -118,32 +125,19 @@ type raftNode struct {
|
||||||
// If transport is nil, server will panic.
|
// If transport is nil, server will panic.
|
||||||
transport rafthttp.Transporter
|
transport rafthttp.Transporter
|
||||||
|
|
||||||
td *contention.TimeoutDetector
|
|
||||||
|
|
||||||
stopped chan struct{}
|
stopped chan struct{}
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||||
// to modify the fields after it has been started.
|
// to modify the fields after it has been started.
|
||||||
// TODO: Ideally raftNode should get rid of the passed in server structure.
|
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||||
func (r *raftNode) start(s *EtcdServer) {
|
|
||||||
r.s = s
|
|
||||||
r.applyc = make(chan apply)
|
r.applyc = make(chan apply)
|
||||||
r.stopped = make(chan struct{})
|
r.stopped = make(chan struct{})
|
||||||
r.done = make(chan struct{})
|
r.done = make(chan struct{})
|
||||||
|
internalTimeout := time.Second
|
||||||
heartbeat := 200 * time.Millisecond
|
|
||||||
if s.Cfg != nil {
|
|
||||||
heartbeat = time.Duration(s.Cfg.TickMs) * time.Millisecond
|
|
||||||
}
|
|
||||||
// set up contention detectors for raft heartbeat message.
|
|
||||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
|
||||||
r.td = contention.NewTimeoutDetector(2 * heartbeat)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var syncC <-chan time.Time
|
|
||||||
|
|
||||||
defer r.onStop()
|
defer r.onStop()
|
||||||
islead := false
|
islead := false
|
||||||
|
|
||||||
|
|
@ -167,32 +161,17 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||||
if rd.RaftState == raft.StateLeader {
|
islead = rd.RaftState == raft.StateLeader
|
||||||
islead = true
|
rh.updateLeadership()
|
||||||
// TODO: raft should send server a notification through chan when
|
}
|
||||||
// it promotes or demotes instead of modifying server directly.
|
|
||||||
syncC = r.s.SyncTicker
|
if len(rd.ReadStates) != 0 {
|
||||||
if r.s.lessor != nil {
|
select {
|
||||||
r.s.lessor.Promote(r.s.Cfg.electionTimeout())
|
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
|
||||||
}
|
case <-time.After(internalTimeout):
|
||||||
// TODO: remove the nil checking
|
plog.Warningf("timed out sending read state")
|
||||||
// current test utility does not provide the stats
|
case <-r.stopped:
|
||||||
if r.s.stats != nil {
|
return
|
||||||
r.s.stats.BecomeLeader()
|
|
||||||
}
|
|
||||||
if r.s.compactor != nil {
|
|
||||||
r.s.compactor.Resume()
|
|
||||||
}
|
|
||||||
r.td.Reset()
|
|
||||||
} else {
|
|
||||||
islead = false
|
|
||||||
if r.s.lessor != nil {
|
|
||||||
r.s.lessor.Demote()
|
|
||||||
}
|
|
||||||
if r.s.compactor != nil {
|
|
||||||
r.s.compactor.Pause()
|
|
||||||
}
|
|
||||||
syncC = nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -203,6 +182,8 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||||
raftDone: raftDone,
|
raftDone: raftDone,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updateCommittedIndex(&ap, rh)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case r.applyc <- ap:
|
case r.applyc <- ap:
|
||||||
case <-r.stopped:
|
case <-r.stopped:
|
||||||
|
|
@ -214,7 +195,7 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||||
// For more details, check raft thesis 10.2.1
|
// For more details, check raft thesis 10.2.1
|
||||||
if islead {
|
if islead {
|
||||||
// gofail: var raftBeforeLeaderSend struct{}
|
// gofail: var raftBeforeLeaderSend struct{}
|
||||||
r.s.send(rd.Messages)
|
r.sendMessages(rd.Messages)
|
||||||
}
|
}
|
||||||
|
|
||||||
// gofail: var raftBeforeSave struct{}
|
// gofail: var raftBeforeSave struct{}
|
||||||
|
|
@ -241,12 +222,10 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||||
|
|
||||||
if !islead {
|
if !islead {
|
||||||
// gofail: var raftBeforeFollowerSend struct{}
|
// gofail: var raftBeforeFollowerSend struct{}
|
||||||
r.s.send(rd.Messages)
|
r.sendMessages(rd.Messages)
|
||||||
}
|
}
|
||||||
raftDone <- struct{}{}
|
raftDone <- struct{}{}
|
||||||
r.Advance()
|
r.Advance()
|
||||||
case <-syncC:
|
|
||||||
r.s.sync(r.s.Cfg.ReqTimeout())
|
|
||||||
case <-r.stopped:
|
case <-r.stopped:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -254,6 +233,59 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
|
||||||
|
var ci uint64
|
||||||
|
if len(ap.entries) != 0 {
|
||||||
|
ci = ap.entries[len(ap.entries)-1].Index
|
||||||
|
}
|
||||||
|
if ap.snapshot.Metadata.Index > ci {
|
||||||
|
ci = ap.snapshot.Metadata.Index
|
||||||
|
}
|
||||||
|
if ci != 0 {
|
||||||
|
rh.updateCommittedIndex(ci)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *raftNode) sendMessages(ms []raftpb.Message) {
|
||||||
|
sentAppResp := false
|
||||||
|
for i := len(ms) - 1; i >= 0; i-- {
|
||||||
|
if r.isIDRemoved(ms[i].To) {
|
||||||
|
ms[i].To = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if ms[i].Type == raftpb.MsgAppResp {
|
||||||
|
if sentAppResp {
|
||||||
|
ms[i].To = 0
|
||||||
|
} else {
|
||||||
|
sentAppResp = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ms[i].Type == raftpb.MsgSnap {
|
||||||
|
// There are two separate data store: the store for v2, and the KV for v3.
|
||||||
|
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||||
|
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||||
|
// current store snapshot and KV snapshot.
|
||||||
|
select {
|
||||||
|
case r.msgSnapC <- ms[i]:
|
||||||
|
default:
|
||||||
|
// drop msgSnap if the inflight chan if full.
|
||||||
|
}
|
||||||
|
ms[i].To = 0
|
||||||
|
}
|
||||||
|
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||||
|
ok, exceed := r.td.Observe(ms[i].To)
|
||||||
|
if !ok {
|
||||||
|
// TODO: limit request rate.
|
||||||
|
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
||||||
|
plog.Warningf("server is likely overloaded")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.transport.Send(ms)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *raftNode) apply() chan apply {
|
func (r *raftNode) apply() chan apply {
|
||||||
return r.applyc
|
return r.applyc
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
@ -40,6 +41,7 @@ import (
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
"github.com/coreos/etcd/mvcc/backend"
|
"github.com/coreos/etcd/mvcc/backend"
|
||||||
|
"github.com/coreos/etcd/pkg/contention"
|
||||||
"github.com/coreos/etcd/pkg/fileutil"
|
"github.com/coreos/etcd/pkg/fileutil"
|
||||||
"github.com/coreos/etcd/pkg/idutil"
|
"github.com/coreos/etcd/pkg/idutil"
|
||||||
"github.com/coreos/etcd/pkg/pbutil"
|
"github.com/coreos/etcd/pkg/pbutil"
|
||||||
|
|
@ -65,6 +67,10 @@ const (
|
||||||
StoreClusterPrefix = "/0"
|
StoreClusterPrefix = "/0"
|
||||||
StoreKeysPrefix = "/1"
|
StoreKeysPrefix = "/1"
|
||||||
|
|
||||||
|
// HealthInterval is the minimum time the cluster should be healthy
|
||||||
|
// before accepting add member requests.
|
||||||
|
HealthInterval = 5 * time.Second
|
||||||
|
|
||||||
purgeFileInterval = 30 * time.Second
|
purgeFileInterval = 30 * time.Second
|
||||||
// monitorVersionInterval should be smaller than the timeout
|
// monitorVersionInterval should be smaller than the timeout
|
||||||
// on the connection. Or we will not be able to reuse the connection
|
// on the connection. Or we will not be able to reuse the connection
|
||||||
|
|
@ -77,6 +83,9 @@ const (
|
||||||
maxInFlightMsgSnap = 16
|
maxInFlightMsgSnap = 16
|
||||||
|
|
||||||
releaseDelayAfterSnapshot = 30 * time.Second
|
releaseDelayAfterSnapshot = 30 * time.Second
|
||||||
|
|
||||||
|
// maxPendingRevokes is the maximum number of outstanding expired lease revocations.
|
||||||
|
maxPendingRevokes = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -158,6 +167,7 @@ type EtcdServer struct {
|
||||||
// inflightSnapshots holds count the number of snapshots currently inflight.
|
// inflightSnapshots holds count the number of snapshots currently inflight.
|
||||||
inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
|
inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
|
||||||
appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||||
|
committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||||
// consistIndex used to hold the offset of current executing entry
|
// consistIndex used to hold the offset of current executing entry
|
||||||
// It is initialized to 0 before executing any entry.
|
// It is initialized to 0 before executing any entry.
|
||||||
consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
|
consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
|
||||||
|
|
@ -168,9 +178,23 @@ type EtcdServer struct {
|
||||||
|
|
||||||
snapCount uint64
|
snapCount uint64
|
||||||
|
|
||||||
w wait.Wait
|
w wait.Wait
|
||||||
stop chan struct{}
|
|
||||||
done chan struct{}
|
readMu sync.RWMutex
|
||||||
|
// read routine notifies etcd server that it waits for reading by sending an empty struct to
|
||||||
|
// readwaitC
|
||||||
|
readwaitc chan struct{}
|
||||||
|
// readNotifier is used to notify the read routine that it can process the request
|
||||||
|
// when there is no error
|
||||||
|
readNotifier *notifier
|
||||||
|
|
||||||
|
// stop signals the run goroutine should shutdown.
|
||||||
|
stop chan struct{}
|
||||||
|
// stopping is closed by run goroutine on shutdown.
|
||||||
|
stopping chan struct{}
|
||||||
|
// done is closed when all goroutines from start() complete.
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
errorc chan error
|
errorc chan error
|
||||||
id types.ID
|
id types.ID
|
||||||
attributes membership.Attributes
|
attributes membership.Attributes
|
||||||
|
|
@ -181,7 +205,12 @@ type EtcdServer struct {
|
||||||
|
|
||||||
applyV2 ApplierV2
|
applyV2 ApplierV2
|
||||||
|
|
||||||
applyV3 applierV3
|
// applyV3 is the applier with auth and quotas
|
||||||
|
applyV3 applierV3
|
||||||
|
// applyV3Base is the core applier without auth or quotas
|
||||||
|
applyV3Base applierV3
|
||||||
|
applyWait wait.WaitTime
|
||||||
|
|
||||||
kv mvcc.ConsistentWatchableKV
|
kv mvcc.ConsistentWatchableKV
|
||||||
lessor lease.Lessor
|
lessor lease.Lessor
|
||||||
bemu sync.Mutex
|
bemu sync.Mutex
|
||||||
|
|
@ -204,8 +233,8 @@ type EtcdServer struct {
|
||||||
// to detect the cluster version immediately.
|
// to detect the cluster version immediately.
|
||||||
forceVersionC chan struct{}
|
forceVersionC chan struct{}
|
||||||
|
|
||||||
msgSnapC chan raftpb.Message
|
// wgMu blocks concurrent waitgroup mutation while server stopping
|
||||||
|
wgMu sync.RWMutex
|
||||||
// wg is used to wait for the go routines that depends on the server state
|
// wg is used to wait for the go routines that depends on the server state
|
||||||
// to exit when stopping the server.
|
// to exit when stopping the server.
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
|
@ -228,15 +257,6 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the migrations.
|
|
||||||
dataVer, err := version.DetectDataDir(cfg.DataDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
haveWAL := wal.Exist(cfg.WALDir())
|
haveWAL := wal.Exist(cfg.WALDir())
|
||||||
|
|
||||||
if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
|
if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
|
||||||
|
|
@ -244,9 +264,24 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
}
|
}
|
||||||
ss := snap.New(cfg.SnapDir())
|
ss := snap.New(cfg.SnapDir())
|
||||||
|
|
||||||
bepath := path.Join(cfg.SnapDir(), databaseFilename)
|
bepath := filepath.Join(cfg.SnapDir(), databaseFilename)
|
||||||
beExist := fileutil.Exist(bepath)
|
beExist := fileutil.Exist(bepath)
|
||||||
be := backend.NewDefaultBackend(bepath)
|
|
||||||
|
var be backend.Backend
|
||||||
|
beOpened := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
be = backend.NewDefaultBackend(bepath)
|
||||||
|
beOpened <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-beOpened:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
|
||||||
|
plog.Warningf("waiting for it to exit before starting...")
|
||||||
|
<-beOpened
|
||||||
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
be.Close()
|
be.Close()
|
||||||
|
|
@ -372,6 +407,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
sstats.Initialize()
|
sstats.Initialize()
|
||||||
lstats := stats.NewLeaderStats(id.String())
|
lstats := stats.NewLeaderStats(id.String())
|
||||||
|
|
||||||
|
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
|
||||||
srv = &EtcdServer{
|
srv = &EtcdServer{
|
||||||
readych: make(chan struct{}),
|
readych: make(chan struct{}),
|
||||||
Cfg: cfg,
|
Cfg: cfg,
|
||||||
|
|
@ -379,10 +415,17 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
errorc: make(chan error, 1),
|
errorc: make(chan error, 1),
|
||||||
store: st,
|
store: st,
|
||||||
r: raftNode{
|
r: raftNode{
|
||||||
|
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
|
||||||
Node: n,
|
Node: n,
|
||||||
ticker: time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
|
ticker: time.Tick(heartbeat),
|
||||||
|
// set up contention detectors for raft heartbeat message.
|
||||||
|
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||||
|
td: contention.NewTimeoutDetector(2 * heartbeat),
|
||||||
|
heartbeat: heartbeat,
|
||||||
raftStorage: s,
|
raftStorage: s,
|
||||||
storage: NewStorage(w, ss),
|
storage: NewStorage(w, ss),
|
||||||
|
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||||
|
readStateC: make(chan raft.ReadState, 1),
|
||||||
},
|
},
|
||||||
id: id,
|
id: id,
|
||||||
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
|
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
|
||||||
|
|
@ -393,15 +436,16 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
peerRt: prt,
|
peerRt: prt,
|
||||||
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
|
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
|
||||||
forceVersionC: make(chan struct{}),
|
forceVersionC: make(chan struct{}),
|
||||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
|
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
|
||||||
|
|
||||||
srv.be = be
|
srv.be = be
|
||||||
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * time.Duration(cfg.TickMs) * time.Millisecond
|
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
|
||||||
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
|
|
||||||
|
|
||||||
|
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
|
||||||
|
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
|
||||||
|
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
|
||||||
srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
|
srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
|
||||||
if beExist {
|
if beExist {
|
||||||
kvindex := srv.kv.ConsistentIndex()
|
kvindex := srv.kv.ConsistentIndex()
|
||||||
|
|
@ -416,12 +460,16 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
}
|
}
|
||||||
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
|
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
|
||||||
|
|
||||||
srv.authStore = auth.NewAuthStore(srv.be)
|
srv.authStore = auth.NewAuthStore(srv.be,
|
||||||
|
func(index uint64) <-chan struct{} {
|
||||||
|
return srv.applyWait.Wait(index)
|
||||||
|
})
|
||||||
if h := cfg.AutoCompactionRetention; h != 0 {
|
if h := cfg.AutoCompactionRetention; h != 0 {
|
||||||
srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
|
srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
|
||||||
srv.compactor.Run()
|
srv.compactor.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srv.applyV3Base = &applierV3backend{srv}
|
||||||
if err = srv.restoreAlarms(); err != nil {
|
if err = srv.restoreAlarms(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -463,10 +511,11 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||||
// It also starts a goroutine to publish its server information.
|
// It also starts a goroutine to publish its server information.
|
||||||
func (s *EtcdServer) Start() {
|
func (s *EtcdServer) Start() {
|
||||||
s.start()
|
s.start()
|
||||||
go s.publish(s.Cfg.ReqTimeout())
|
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
|
||||||
go s.purgeFile()
|
s.goAttach(s.purgeFile)
|
||||||
go monitorFileDescriptor(s.done)
|
s.goAttach(func() { monitorFileDescriptor(s.stopping) })
|
||||||
go s.monitorVersions()
|
s.goAttach(s.monitorVersions)
|
||||||
|
s.goAttach(s.linearizableReadLoop)
|
||||||
}
|
}
|
||||||
|
|
||||||
// start prepares and starts server in a new goroutine. It is no longer safe to
|
// start prepares and starts server in a new goroutine. It is no longer safe to
|
||||||
|
|
@ -478,8 +527,12 @@ func (s *EtcdServer) start() {
|
||||||
s.snapCount = DefaultSnapCount
|
s.snapCount = DefaultSnapCount
|
||||||
}
|
}
|
||||||
s.w = wait.New()
|
s.w = wait.New()
|
||||||
|
s.applyWait = wait.NewTimeList()
|
||||||
s.done = make(chan struct{})
|
s.done = make(chan struct{})
|
||||||
s.stop = make(chan struct{})
|
s.stop = make(chan struct{})
|
||||||
|
s.stopping = make(chan struct{})
|
||||||
|
s.readwaitc = make(chan struct{}, 1)
|
||||||
|
s.readNotifier = newNotifier()
|
||||||
if s.ClusterVersion() != nil {
|
if s.ClusterVersion() != nil {
|
||||||
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
|
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -503,7 +556,7 @@ func (s *EtcdServer) purgeFile() {
|
||||||
plog.Fatalf("failed to purge wal file %v", e)
|
plog.Fatalf("failed to purge wal file %v", e)
|
||||||
case e := <-serrc:
|
case e := <-serrc:
|
||||||
plog.Fatalf("failed to purge snap file %v", e)
|
plog.Fatalf("failed to purge snap file %v", e)
|
||||||
case <-s.done:
|
case <-s.stopping:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -516,6 +569,8 @@ func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler()
|
||||||
|
|
||||||
func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
|
func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
|
||||||
|
|
||||||
|
func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
|
||||||
|
|
||||||
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
|
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
|
||||||
if s.cluster.IsIDRemoved(types.ID(m.From)) {
|
if s.cluster.IsIDRemoved(types.ID(m.From)) {
|
||||||
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
|
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
|
||||||
|
|
@ -540,28 +595,91 @@ func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
|
||||||
type etcdProgress struct {
|
type etcdProgress struct {
|
||||||
confState raftpb.ConfState
|
confState raftpb.ConfState
|
||||||
snapi uint64
|
snapi uint64
|
||||||
|
appliedt uint64
|
||||||
appliedi uint64
|
appliedi uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
|
||||||
|
// and helps decouple state machine logic from Raft algorithms.
|
||||||
|
// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
|
||||||
|
type raftReadyHandler struct {
|
||||||
|
updateLeadership func()
|
||||||
|
updateCommittedIndex func(uint64)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) run() {
|
func (s *EtcdServer) run() {
|
||||||
snap, err := s.r.raftStorage.Snapshot()
|
snap, err := s.r.raftStorage.Snapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Panicf("get snapshot from raft storage error: %v", err)
|
plog.Panicf("get snapshot from raft storage error: %v", err)
|
||||||
}
|
}
|
||||||
s.r.start(s)
|
|
||||||
|
var (
|
||||||
|
smu sync.RWMutex
|
||||||
|
syncC <-chan time.Time
|
||||||
|
)
|
||||||
|
setSyncC := func(ch <-chan time.Time) {
|
||||||
|
smu.Lock()
|
||||||
|
syncC = ch
|
||||||
|
smu.Unlock()
|
||||||
|
}
|
||||||
|
getSyncC := func() (ch <-chan time.Time) {
|
||||||
|
smu.RLock()
|
||||||
|
ch = syncC
|
||||||
|
smu.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rh := &raftReadyHandler{
|
||||||
|
updateLeadership: func() {
|
||||||
|
if !s.isLeader() {
|
||||||
|
if s.lessor != nil {
|
||||||
|
s.lessor.Demote()
|
||||||
|
}
|
||||||
|
if s.compactor != nil {
|
||||||
|
s.compactor.Pause()
|
||||||
|
}
|
||||||
|
setSyncC(nil)
|
||||||
|
} else {
|
||||||
|
setSyncC(s.SyncTicker)
|
||||||
|
if s.compactor != nil {
|
||||||
|
s.compactor.Resume()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove the nil checking
|
||||||
|
// current test utility does not provide the stats
|
||||||
|
if s.stats != nil {
|
||||||
|
s.stats.BecomeLeader()
|
||||||
|
}
|
||||||
|
if s.r.td != nil {
|
||||||
|
s.r.td.Reset()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
updateCommittedIndex: func(ci uint64) {
|
||||||
|
cci := s.getCommittedIndex()
|
||||||
|
if ci > cci {
|
||||||
|
s.setCommittedIndex(ci)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
s.r.start(rh)
|
||||||
|
|
||||||
// asynchronously accept apply packets, dispatch progress in-order
|
// asynchronously accept apply packets, dispatch progress in-order
|
||||||
sched := schedule.NewFIFOScheduler()
|
sched := schedule.NewFIFOScheduler()
|
||||||
ep := etcdProgress{
|
ep := etcdProgress{
|
||||||
confState: snap.Metadata.ConfState,
|
confState: snap.Metadata.ConfState,
|
||||||
snapi: snap.Metadata.Index,
|
snapi: snap.Metadata.Index,
|
||||||
|
appliedt: snap.Metadata.Term,
|
||||||
appliedi: snap.Metadata.Index,
|
appliedi: snap.Metadata.Index,
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
|
||||||
|
close(s.stopping)
|
||||||
|
s.wgMu.Unlock()
|
||||||
|
|
||||||
sched.Stop()
|
sched.Stop()
|
||||||
|
|
||||||
// wait for snapshots before closing raft so wal stays open
|
// wait for gouroutines before closing raft so wal stays open
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
|
|
||||||
// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
|
// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
|
||||||
|
|
@ -576,6 +694,9 @@ func (s *EtcdServer) run() {
|
||||||
if s.kv != nil {
|
if s.kv != nil {
|
||||||
s.kv.Close()
|
s.kv.Close()
|
||||||
}
|
}
|
||||||
|
if s.authStore != nil {
|
||||||
|
s.authStore.Close()
|
||||||
|
}
|
||||||
if s.be != nil {
|
if s.be != nil {
|
||||||
s.be.Close()
|
s.be.Close()
|
||||||
}
|
}
|
||||||
|
|
@ -596,15 +717,30 @@ func (s *EtcdServer) run() {
|
||||||
f := func(context.Context) { s.applyAll(&ep, &ap) }
|
f := func(context.Context) { s.applyAll(&ep, &ap) }
|
||||||
sched.Schedule(f)
|
sched.Schedule(f)
|
||||||
case leases := <-expiredLeaseC:
|
case leases := <-expiredLeaseC:
|
||||||
go func() {
|
s.goAttach(func() {
|
||||||
for _, l := range leases {
|
// Increases throughput of expired leases deletion process through parallelization
|
||||||
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(l.ID)})
|
c := make(chan struct{}, maxPendingRevokes)
|
||||||
|
for _, lease := range leases {
|
||||||
|
select {
|
||||||
|
case c <- struct{}{}:
|
||||||
|
case <-s.stopping:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lid := lease.ID
|
||||||
|
s.goAttach(func() {
|
||||||
|
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||||
|
<-c
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
case err := <-s.errorc:
|
case err := <-s.errorc:
|
||||||
plog.Errorf("%s", err)
|
plog.Errorf("%s", err)
|
||||||
plog.Infof("the data-dir used by this member must be removed.")
|
plog.Infof("the data-dir used by this member must be removed.")
|
||||||
return
|
return
|
||||||
|
case <-getSyncC():
|
||||||
|
if s.store.HasTTLKeys() {
|
||||||
|
s.sync(s.Cfg.ReqTimeout())
|
||||||
|
}
|
||||||
case <-s.stop:
|
case <-s.stop:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -622,15 +758,17 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||||
plog.Warningf("avoid queries with large range/delete range!")
|
plog.Warningf("avoid queries with large range/delete range!")
|
||||||
}
|
}
|
||||||
proposalsApplied.Set(float64(ep.appliedi))
|
proposalsApplied.Set(float64(ep.appliedi))
|
||||||
|
s.applyWait.Trigger(ep.appliedi)
|
||||||
// wait for the raft routine to finish the disk writes before triggering a
|
// wait for the raft routine to finish the disk writes before triggering a
|
||||||
// snapshot. or applied index might be greater than the last index in raft
|
// snapshot. or applied index might be greater than the last index in raft
|
||||||
// storage, since the raft routine might be slower than apply routine.
|
// storage, since the raft routine might be slower than apply routine.
|
||||||
<-apply.raftDone
|
<-apply.raftDone
|
||||||
|
|
||||||
s.triggerSnapshot(ep)
|
s.triggerSnapshot(ep)
|
||||||
select {
|
select {
|
||||||
// snapshot requested via send()
|
// snapshot requested via send()
|
||||||
case m := <-s.msgSnapC:
|
case m := <-s.r.msgSnapC:
|
||||||
merged := s.createMergedSnapshotMessage(m, ep.appliedi, ep.confState)
|
merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
|
||||||
s.sendMergedSnap(merged)
|
s.sendMergedSnap(merged)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
@ -654,7 +792,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||||
plog.Panicf("get database snapshot file path error: %v", err)
|
plog.Panicf("get database snapshot file path error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn := path.Join(s.Cfg.SnapDir(), databaseFilename)
|
fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||||
if err := os.Rename(snapfn, fn); err != nil {
|
if err := os.Rename(snapfn, fn); err != nil {
|
||||||
plog.Panicf("rename snapshot file error: %v", err)
|
plog.Panicf("rename snapshot file error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -732,6 +870,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||||
}
|
}
|
||||||
plog.Info("finished adding peers from new cluster configuration into network...")
|
plog.Info("finished adding peers from new cluster configuration into network...")
|
||||||
|
|
||||||
|
ep.appliedt = apply.snapshot.Metadata.Term
|
||||||
ep.appliedi = apply.snapshot.Metadata.Index
|
ep.appliedi = apply.snapshot.Metadata.Index
|
||||||
ep.snapi = ep.appliedi
|
ep.snapi = ep.appliedi
|
||||||
ep.confState = apply.snapshot.Metadata.ConfState
|
ep.confState = apply.snapshot.Metadata.ConfState
|
||||||
|
|
@ -753,7 +892,7 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var shouldstop bool
|
var shouldstop bool
|
||||||
if ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||||
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
|
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -768,9 +907,62 @@ func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
|
||||||
ep.snapi = ep.appliedi
|
ep.snapi = ep.appliedi
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the server gracefully, and shuts down the running goroutine.
|
func (s *EtcdServer) isMultiNode() bool {
|
||||||
// Stop should be called after a Start(s), otherwise it will block forever.
|
return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
|
||||||
func (s *EtcdServer) Stop() {
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) isLeader() bool {
|
||||||
|
return uint64(s.ID()) == s.Lead()
|
||||||
|
}
|
||||||
|
|
||||||
|
// transferLeadership transfers the leader to the given transferee.
|
||||||
|
// TODO: maybe expose to client?
|
||||||
|
func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error {
|
||||||
|
now := time.Now()
|
||||||
|
interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||||
|
|
||||||
|
plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
|
||||||
|
s.r.TransferLeadership(ctx, lead, transferee)
|
||||||
|
for s.Lead() != transferee {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done(): // time out
|
||||||
|
return ErrTimeoutLeaderTransfer
|
||||||
|
case <-time.After(interval):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: drain all requests, or drop all messages to the old leader
|
||||||
|
|
||||||
|
plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransferLeadership transfers the leader to the chosen transferee.
|
||||||
|
func (s *EtcdServer) TransferLeadership() error {
|
||||||
|
if !s.isLeader() {
|
||||||
|
plog.Printf("skipped leadership transfer for stopping non-leader member")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.isMultiNode() {
|
||||||
|
plog.Printf("skipped leadership transfer for single member cluster")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
|
||||||
|
if !ok {
|
||||||
|
return ErrUnhealthy
|
||||||
|
}
|
||||||
|
|
||||||
|
tm := s.Cfg.ReqTimeout()
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), tm)
|
||||||
|
err := s.transferLeadership(ctx, s.Lead(), uint64(transferee))
|
||||||
|
cancel()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HardStop stops the server without coordination with other members in the cluster.
|
||||||
|
func (s *EtcdServer) HardStop() {
|
||||||
select {
|
select {
|
||||||
case s.stop <- struct{}{}:
|
case s.stop <- struct{}{}:
|
||||||
case <-s.done:
|
case <-s.done:
|
||||||
|
|
@ -779,6 +971,17 @@ func (s *EtcdServer) Stop() {
|
||||||
<-s.done
|
<-s.done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops the server gracefully, and shuts down the running goroutine.
|
||||||
|
// Stop should be called after a Start(s), otherwise it will block forever.
|
||||||
|
// When stopping leader, Stop transfers its leadership to one of its peers
|
||||||
|
// before stopping the server.
|
||||||
|
func (s *EtcdServer) Stop() {
|
||||||
|
if err := s.TransferLeadership(); err != nil {
|
||||||
|
plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
|
||||||
|
}
|
||||||
|
s.HardStop()
|
||||||
|
}
|
||||||
|
|
||||||
// ReadyNotify returns a channel that will be closed when the server
|
// ReadyNotify returns a channel that will be closed when the server
|
||||||
// is ready to serve client requests
|
// is ready to serve client requests
|
||||||
func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
|
func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
|
||||||
|
|
@ -810,11 +1013,42 @@ func (s *EtcdServer) LeaderStats() []byte {
|
||||||
|
|
||||||
func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
|
func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
|
||||||
|
|
||||||
|
func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
|
||||||
|
if s.authStore == nil {
|
||||||
|
// In the context of ordinal etcd process, s.authStore will never be nil.
|
||||||
|
// This branch is for handling cases in server_test.go
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that this permission check is done in the API layer,
|
||||||
|
// so TOCTOU problem can be caused potentially in a schedule like this:
|
||||||
|
// update membership with user A -> revoke root role of A -> apply membership change
|
||||||
|
// in the state machine layer
|
||||||
|
// However, both of membership change and role management requires the root privilege.
|
||||||
|
// So careful operation by admins can prevent the problem.
|
||||||
|
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.AuthStore().IsAdminPermitted(authInfo)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error {
|
func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error {
|
||||||
if s.Cfg.StrictReconfigCheck && !s.cluster.IsReadyToAddNewMember() {
|
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||||
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
|
return err
|
||||||
// In such a case adding a new member is allowed unconditionally
|
}
|
||||||
return ErrNotEnoughStartedMembers
|
|
||||||
|
if s.Cfg.StrictReconfigCheck {
|
||||||
|
// by default StrictReconfigCheck is enabled; reject new members if unhealthy
|
||||||
|
if !s.cluster.IsReadyToAddNewMember() {
|
||||||
|
plog.Warningf("not enough started members, rejecting member add %+v", memb)
|
||||||
|
return ErrNotEnoughStartedMembers
|
||||||
|
}
|
||||||
|
if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
|
||||||
|
plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
|
||||||
|
return ErrUnhealthy
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: move Member to protobuf type
|
// TODO: move Member to protobuf type
|
||||||
|
|
@ -831,10 +1065,13 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||||
if s.Cfg.StrictReconfigCheck && !s.cluster.IsReadyToRemoveMember(id) {
|
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||||
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
|
return err
|
||||||
// In such a case removing a member is allowed unconditionally
|
}
|
||||||
return ErrNotEnoughStartedMembers
|
|
||||||
|
// by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
|
||||||
|
if err := s.mayRemoveMember(types.ID(id)); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cc := raftpb.ConfChange{
|
cc := raftpb.ConfChange{
|
||||||
|
|
@ -844,9 +1081,39 @@ func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||||
return s.configure(ctx, cc)
|
return s.configure(ctx, cc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) mayRemoveMember(id types.ID) error {
|
||||||
|
if !s.Cfg.StrictReconfigCheck {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
|
||||||
|
plog.Warningf("not enough started members, rejecting remove member %s", id)
|
||||||
|
return ErrNotEnoughStartedMembers
|
||||||
|
}
|
||||||
|
|
||||||
|
// downed member is safe to remove since it's not part of the active quorum
|
||||||
|
if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// protect quorum if some members are down
|
||||||
|
m := s.cluster.Members()
|
||||||
|
active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
|
||||||
|
if (active - 1) < 1+((len(m)-1)/2) {
|
||||||
|
plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
|
||||||
|
return ErrUnhealthy
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error {
|
func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error {
|
||||||
b, err := json.Marshal(memb)
|
b, merr := json.Marshal(memb)
|
||||||
if err != nil {
|
if merr != nil {
|
||||||
|
return merr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cc := raftpb.ConfChange{
|
cc := raftpb.ConfChange{
|
||||||
|
|
@ -870,8 +1137,6 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
|
||||||
|
|
||||||
func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
|
func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
|
||||||
|
|
||||||
func (s *EtcdServer) IsPprofEnabled() bool { return s.Cfg.EnablePprof }
|
|
||||||
|
|
||||||
// configure sends a configuration change through consensus and
|
// configure sends a configuration change through consensus and
|
||||||
// then waits for it to be applied to the server. It
|
// then waits for it to be applied to the server. It
|
||||||
// will block until the change is performed or there is an error.
|
// will block until the change is performed or there is an error.
|
||||||
|
|
@ -895,7 +1160,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
s.w.Trigger(cc.ID, nil) // GC wait
|
s.w.Trigger(cc.ID, nil) // GC wait
|
||||||
return s.parseProposeCtxErr(ctx.Err(), start)
|
return s.parseProposeCtxErr(ctx.Err(), start)
|
||||||
case <-s.done:
|
case <-s.stopping:
|
||||||
return ErrStopped
|
return ErrStopped
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -913,10 +1178,10 @@ func (s *EtcdServer) sync(timeout time.Duration) {
|
||||||
data := pbutil.MustMarshal(&req)
|
data := pbutil.MustMarshal(&req)
|
||||||
// There is no promise that node has leader when do SYNC request,
|
// There is no promise that node has leader when do SYNC request,
|
||||||
// so it uses goroutine to propose.
|
// so it uses goroutine to propose.
|
||||||
go func() {
|
s.goAttach(func() {
|
||||||
s.r.Propose(ctx, data)
|
s.r.Propose(ctx, data)
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// publish registers server information into the cluster. The information
|
// publish registers server information into the cluster. The information
|
||||||
|
|
@ -954,52 +1219,11 @@ func (s *EtcdServer) publish(timeout time.Duration) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: move this function into raft.go
|
|
||||||
func (s *EtcdServer) send(ms []raftpb.Message) {
|
|
||||||
sentAppResp := false
|
|
||||||
for i := len(ms) - 1; i >= 0; i-- {
|
|
||||||
if s.cluster.IsIDRemoved(types.ID(ms[i].To)) {
|
|
||||||
ms[i].To = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if ms[i].Type == raftpb.MsgAppResp {
|
|
||||||
if sentAppResp {
|
|
||||||
ms[i].To = 0
|
|
||||||
} else {
|
|
||||||
sentAppResp = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ms[i].Type == raftpb.MsgSnap {
|
|
||||||
// There are two separate data store: the store for v2, and the KV for v3.
|
|
||||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
|
||||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
|
||||||
// current store snapshot and KV snapshot.
|
|
||||||
select {
|
|
||||||
case s.msgSnapC <- ms[i]:
|
|
||||||
default:
|
|
||||||
// drop msgSnap if the inflight chan if full.
|
|
||||||
}
|
|
||||||
ms[i].To = 0
|
|
||||||
}
|
|
||||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
|
||||||
ok, exceed := s.r.td.Observe(ms[i].To)
|
|
||||||
if !ok {
|
|
||||||
// TODO: limit request rate.
|
|
||||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %dms timeout for %v)", s.Cfg.TickMs, exceed)
|
|
||||||
plog.Warningf("server is likely overloaded")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.r.transport.Send(ms)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||||
atomic.AddInt64(&s.inflightSnapshots, 1)
|
atomic.AddInt64(&s.inflightSnapshots, 1)
|
||||||
|
|
||||||
s.r.transport.SendSnapshot(merged)
|
s.r.transport.SendSnapshot(merged)
|
||||||
go func() {
|
s.goAttach(func() {
|
||||||
select {
|
select {
|
||||||
case ok := <-merged.CloseNotify():
|
case ok := <-merged.CloseNotify():
|
||||||
// delay releasing inflight snapshot for another 30 seconds to
|
// delay releasing inflight snapshot for another 30 seconds to
|
||||||
|
|
@ -1009,22 +1233,20 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||||
if ok {
|
if ok {
|
||||||
select {
|
select {
|
||||||
case <-time.After(releaseDelayAfterSnapshot):
|
case <-time.After(releaseDelayAfterSnapshot):
|
||||||
case <-s.done:
|
case <-s.stopping:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic.AddInt64(&s.inflightSnapshots, -1)
|
atomic.AddInt64(&s.inflightSnapshots, -1)
|
||||||
case <-s.done:
|
case <-s.stopping:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply takes entries received from Raft (after it has been committed) and
|
// apply takes entries received from Raft (after it has been committed) and
|
||||||
// applies them to the current state of the EtcdServer.
|
// applies them to the current state of the EtcdServer.
|
||||||
// The given entries should not be empty.
|
// The given entries should not be empty.
|
||||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
|
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
|
||||||
var applied uint64
|
|
||||||
var shouldstop bool
|
|
||||||
for i := range es {
|
for i := range es {
|
||||||
e := es[i]
|
e := es[i]
|
||||||
switch e.Type {
|
switch e.Type {
|
||||||
|
|
@ -1034,16 +1256,17 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint
|
||||||
var cc raftpb.ConfChange
|
var cc raftpb.ConfChange
|
||||||
pbutil.MustUnmarshal(&cc, e.Data)
|
pbutil.MustUnmarshal(&cc, e.Data)
|
||||||
removedSelf, err := s.applyConfChange(cc, confState)
|
removedSelf, err := s.applyConfChange(cc, confState)
|
||||||
shouldstop = shouldstop || removedSelf
|
shouldStop = shouldStop || removedSelf
|
||||||
s.w.Trigger(cc.ID, err)
|
s.w.Trigger(cc.ID, err)
|
||||||
default:
|
default:
|
||||||
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
|
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
|
||||||
}
|
}
|
||||||
atomic.StoreUint64(&s.r.index, e.Index)
|
atomic.StoreUint64(&s.r.index, e.Index)
|
||||||
atomic.StoreUint64(&s.r.term, e.Term)
|
atomic.StoreUint64(&s.r.term, e.Term)
|
||||||
applied = e.Index
|
appliedt = e.Term
|
||||||
|
appliedi = e.Index
|
||||||
}
|
}
|
||||||
return applied, shouldstop
|
return appliedt, appliedi, shouldStop
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
|
// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
|
||||||
|
|
@ -1054,6 +1277,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||||
s.consistIndex.setConsistentIndex(e.Index)
|
s.consistIndex.setConsistentIndex(e.Index)
|
||||||
shouldApplyV3 = true
|
shouldApplyV3 = true
|
||||||
}
|
}
|
||||||
|
defer s.setAppliedIndex(e.Index)
|
||||||
|
|
||||||
// raft state machine may generate noop entry when leader confirmation.
|
// raft state machine may generate noop entry when leader confirmation.
|
||||||
// skip it in advance to avoid some potential bug in the future
|
// skip it in advance to avoid some potential bug in the future
|
||||||
|
|
@ -1062,6 +1286,11 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||||
case s.forceVersionC <- struct{}{}:
|
case s.forceVersionC <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
// promote lessor when the local member is leader and finished
|
||||||
|
// applying all entries from the last term.
|
||||||
|
if s.isLeader() {
|
||||||
|
s.lessor.Promote(s.Cfg.electionTimeout())
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1088,15 +1317,26 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||||
id = raftReq.Header.ID
|
id = raftReq.Header.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
ar := s.applyV3.Apply(&raftReq)
|
var ar *applyResult
|
||||||
s.setAppliedIndex(e.Index)
|
needResult := s.w.IsRegistered(id)
|
||||||
|
if needResult || !noSideEffect(&raftReq) {
|
||||||
|
if !needResult && raftReq.Txn != nil {
|
||||||
|
removeNeedlessRangeReqs(raftReq.Txn)
|
||||||
|
}
|
||||||
|
ar = s.applyV3.Apply(&raftReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ar == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
|
if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
|
||||||
s.w.Trigger(id, ar)
|
s.w.Trigger(id, ar)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
plog.Errorf("applying raft message exceeded backend quota")
|
plog.Errorf("applying raft message exceeded backend quota")
|
||||||
go func() {
|
s.goAttach(func() {
|
||||||
a := &pb.AlarmRequest{
|
a := &pb.AlarmRequest{
|
||||||
MemberID: uint64(s.ID()),
|
MemberID: uint64(s.ID()),
|
||||||
Action: pb.AlarmRequest_ACTIVATE,
|
Action: pb.AlarmRequest_ACTIVATE,
|
||||||
|
|
@ -1105,7 +1345,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||||
r := pb.InternalRaftRequest{Alarm: a}
|
r := pb.InternalRaftRequest{Alarm: a}
|
||||||
s.processInternalRaftRequest(context.TODO(), r)
|
s.processInternalRaftRequest(context.TODO(), r)
|
||||||
s.w.Trigger(id, ar)
|
s.w.Trigger(id, ar)
|
||||||
}()
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyConfChange applies a ConfChange to the server. It is only
|
// applyConfChange applies a ConfChange to the server. It is only
|
||||||
|
|
@ -1156,11 +1396,15 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
|
||||||
// TODO: non-blocking snapshot
|
// TODO: non-blocking snapshot
|
||||||
func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||||
clone := s.store.Clone()
|
clone := s.store.Clone()
|
||||||
|
// commit kv to write metadata (for example: consistent index) to disk.
|
||||||
|
// KV().commit() updates the consistent index in backend.
|
||||||
|
// All operations that update consistent index must be called sequentially
|
||||||
|
// from applyAll function.
|
||||||
|
// So KV().Commit() cannot run in parallel with apply. It has to be called outside
|
||||||
|
// the go routine created below.
|
||||||
|
s.KV().Commit()
|
||||||
|
|
||||||
s.wg.Add(1)
|
s.goAttach(func() {
|
||||||
go func() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
|
|
||||||
d, err := clone.SaveNoCopy()
|
d, err := clone.SaveNoCopy()
|
||||||
// TODO: current store will never fail to do a snapshot
|
// TODO: current store will never fail to do a snapshot
|
||||||
// what should we do if the store might fail?
|
// what should we do if the store might fail?
|
||||||
|
|
@ -1176,8 +1420,6 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||||
}
|
}
|
||||||
plog.Panicf("unexpected create snapshot error %v", err)
|
plog.Panicf("unexpected create snapshot error %v", err)
|
||||||
}
|
}
|
||||||
// commit kv to write metadata (for example: consistent index) to disk.
|
|
||||||
s.KV().Commit()
|
|
||||||
// SaveSnap saves the snapshot and releases the locked wal files
|
// SaveSnap saves the snapshot and releases the locked wal files
|
||||||
// to the snapshot index.
|
// to the snapshot index.
|
||||||
if err = s.r.storage.SaveSnap(snap); err != nil {
|
if err = s.r.storage.SaveSnap(snap); err != nil {
|
||||||
|
|
@ -1210,7 +1452,23 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||||
plog.Panicf("unexpected compaction error %v", err)
|
plog.Panicf("unexpected compaction error %v", err)
|
||||||
}
|
}
|
||||||
plog.Infof("compacted raft log at %d", compacti)
|
plog.Infof("compacted raft log at %d", compacti)
|
||||||
}()
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CutPeer drops messages to the specified peer.
|
||||||
|
func (s *EtcdServer) CutPeer(id types.ID) {
|
||||||
|
tr, ok := s.r.transport.(*rafthttp.Transport)
|
||||||
|
if ok {
|
||||||
|
tr.CutPeer(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MendPeer recovers the message dropping behavior of the given peer.
|
||||||
|
func (s *EtcdServer) MendPeer(id types.ID) {
|
||||||
|
tr, ok := s.r.transport.(*rafthttp.Transport)
|
||||||
|
if ok {
|
||||||
|
tr.MendPeer(id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
|
func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
|
||||||
|
|
@ -1233,7 +1491,7 @@ func (s *EtcdServer) monitorVersions() {
|
||||||
select {
|
select {
|
||||||
case <-s.forceVersionC:
|
case <-s.forceVersionC:
|
||||||
case <-time.After(monitorVersionInterval):
|
case <-time.After(monitorVersionInterval):
|
||||||
case <-s.done:
|
case <-s.stopping:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1254,18 +1512,18 @@ func (s *EtcdServer) monitorVersions() {
|
||||||
// 1. use the decided version if possible
|
// 1. use the decided version if possible
|
||||||
// 2. or use the min cluster version
|
// 2. or use the min cluster version
|
||||||
if s.cluster.Version() == nil {
|
if s.cluster.Version() == nil {
|
||||||
|
verStr := version.MinClusterVersion
|
||||||
if v != nil {
|
if v != nil {
|
||||||
go s.updateClusterVersion(v.String())
|
verStr = v.String()
|
||||||
} else {
|
|
||||||
go s.updateClusterVersion(version.MinClusterVersion)
|
|
||||||
}
|
}
|
||||||
|
s.goAttach(func() { s.updateClusterVersion(verStr) })
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// update cluster version only if the decided version is greater than
|
// update cluster version only if the decided version is greater than
|
||||||
// the current cluster version
|
// the current cluster version
|
||||||
if v != nil && s.cluster.Version().LessThan(*v) {
|
if v != nil && s.cluster.Version().LessThan(*v) {
|
||||||
go s.updateClusterVersion(v.String())
|
s.goAttach(func() { s.updateClusterVersion(v.String()) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1355,3 +1613,31 @@ func (s *EtcdServer) getAppliedIndex() uint64 {
|
||||||
func (s *EtcdServer) setAppliedIndex(v uint64) {
|
func (s *EtcdServer) setAppliedIndex(v uint64) {
|
||||||
atomic.StoreUint64(&s.appliedIndex, v)
|
atomic.StoreUint64(&s.appliedIndex, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) getCommittedIndex() uint64 {
|
||||||
|
return atomic.LoadUint64(&s.committedIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) setCommittedIndex(v uint64) {
|
||||||
|
atomic.StoreUint64(&s.committedIndex, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// goAttach creates a goroutine on a given function and tracks it using
|
||||||
|
// the etcdserver waitgroup.
|
||||||
|
func (s *EtcdServer) goAttach(f func()) {
|
||||||
|
s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
|
||||||
|
defer s.wgMu.RUnlock()
|
||||||
|
select {
|
||||||
|
case <-s.stopping:
|
||||||
|
plog.Warning("server has stopped (skipping goAttach)")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// now safe to add since waitgroup wait has not started yet
|
||||||
|
s.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
f()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ package etcdserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/mvcc/backend"
|
"github.com/coreos/etcd/mvcc/backend"
|
||||||
"github.com/coreos/etcd/raft/raftpb"
|
"github.com/coreos/etcd/raft/raftpb"
|
||||||
|
|
@ -26,12 +25,7 @@ import (
|
||||||
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
|
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
|
||||||
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
|
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
|
||||||
// as ReadCloser.
|
// as ReadCloser.
|
||||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapi uint64, confState raftpb.ConfState) snap.Message {
|
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||||
snapt, err := s.r.raftStorage.Term(snapi)
|
|
||||||
if err != nil {
|
|
||||||
log.Panicf("get term should never fail: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get a snapshot of v2 store as []byte
|
// get a snapshot of v2 store as []byte
|
||||||
clone := s.store.Clone()
|
clone := s.store.Clone()
|
||||||
d, err := clone.SaveNoCopy()
|
d, err := clone.SaveNoCopy()
|
||||||
|
|
|
||||||
|
|
@ -123,17 +123,11 @@ func (ss *ServerStats) SendAppendReq(reqSize int) {
|
||||||
ss.Lock()
|
ss.Lock()
|
||||||
defer ss.Unlock()
|
defer ss.Unlock()
|
||||||
|
|
||||||
now := time.Now()
|
ss.becomeLeader()
|
||||||
|
|
||||||
if ss.State != raft.StateLeader {
|
|
||||||
ss.State = raft.StateLeader
|
|
||||||
ss.LeaderInfo.Name = ss.ID
|
|
||||||
ss.LeaderInfo.StartTime = now
|
|
||||||
}
|
|
||||||
|
|
||||||
ss.sendRateQueue.Insert(
|
ss.sendRateQueue.Insert(
|
||||||
&RequestStats{
|
&RequestStats{
|
||||||
SendingTime: now,
|
SendingTime: time.Now(),
|
||||||
Size: reqSize,
|
Size: reqSize,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -144,7 +138,10 @@ func (ss *ServerStats) SendAppendReq(reqSize int) {
|
||||||
func (ss *ServerStats) BecomeLeader() {
|
func (ss *ServerStats) BecomeLeader() {
|
||||||
ss.Lock()
|
ss.Lock()
|
||||||
defer ss.Unlock()
|
defer ss.Unlock()
|
||||||
|
ss.becomeLeader()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ServerStats) becomeLeader() {
|
||||||
if ss.State != raft.StateLeader {
|
if ss.State != raft.StateLeader {
|
||||||
ss.State = raft.StateLeader
|
ss.State = raft.StateLeader
|
||||||
ss.LeaderInfo.Name = ss.ID
|
ss.LeaderInfo.Name = ss.ID
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ package stats
|
||||||
import "github.com/coreos/pkg/capnslog"
|
import "github.com/coreos/pkg/capnslog"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "stats")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats")
|
||||||
)
|
)
|
||||||
|
|
||||||
type Stats interface {
|
type Stats interface {
|
||||||
|
|
|
||||||
|
|
@ -16,16 +16,12 @@ package etcdserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/pkg/fileutil"
|
|
||||||
"github.com/coreos/etcd/pkg/pbutil"
|
"github.com/coreos/etcd/pkg/pbutil"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"github.com/coreos/etcd/raft/raftpb"
|
"github.com/coreos/etcd/raft/raftpb"
|
||||||
"github.com/coreos/etcd/snap"
|
"github.com/coreos/etcd/snap"
|
||||||
"github.com/coreos/etcd/version"
|
|
||||||
"github.com/coreos/etcd/wal"
|
"github.com/coreos/etcd/wal"
|
||||||
"github.com/coreos/etcd/wal/walpb"
|
"github.com/coreos/etcd/wal/walpb"
|
||||||
)
|
)
|
||||||
|
|
@ -103,41 +99,3 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
|
||||||
cid = types.ID(metadata.ClusterID)
|
cid = types.ID(metadata.ClusterID)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// upgradeDataDir converts an older version of the etcdServer data to the newest version.
|
|
||||||
// It must ensure that, after upgrading, the most recent version is present.
|
|
||||||
func upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {
|
|
||||||
switch ver {
|
|
||||||
case version.DataDir2_0:
|
|
||||||
err := makeMemberDir(baseDataDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
case version.DataDir2_0_1:
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeMemberDir(dir string) error {
|
|
||||||
membdir := path.Join(dir, "member")
|
|
||||||
_, err := os.Stat(membdir)
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return nil
|
|
||||||
case !os.IsNotExist(err):
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := fileutil.CreateDirAll(membdir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
names := []string{"snap", "wal"}
|
|
||||||
for _, name := range names {
|
|
||||||
if err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -25,13 +25,7 @@ import (
|
||||||
// isConnectedToQuorumSince checks whether the local member is connected to the
|
// isConnectedToQuorumSince checks whether the local member is connected to the
|
||||||
// quorum of the cluster since the given time.
|
// quorum of the cluster since the given time.
|
||||||
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||||
var connectedNum int
|
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
|
||||||
for _, m := range members {
|
|
||||||
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
|
||||||
connectedNum++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return connectedNum >= (len(members)+1)/2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isConnectedSince checks whether the local member is connected to the
|
// isConnectedSince checks whether the local member is connected to the
|
||||||
|
|
@ -40,3 +34,64 @@ func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote ty
|
||||||
t := transport.ActiveSince(remote)
|
t := transport.ActiveSince(remote)
|
||||||
return !t.IsZero() && t.Before(since)
|
return !t.IsZero() && t.Before(since)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isConnectedFullySince checks whether the local member is connected to all
|
||||||
|
// members in the cluster since the given time.
|
||||||
|
func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||||
|
return numConnectedSince(transport, since, self, members) == len(members)
|
||||||
|
}
|
||||||
|
|
||||||
|
// numConnectedSince counts how many members are connected to the local member
|
||||||
|
// since the given time.
|
||||||
|
func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
|
||||||
|
connectedNum := 0
|
||||||
|
for _, m := range members {
|
||||||
|
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
||||||
|
connectedNum++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return connectedNum
|
||||||
|
}
|
||||||
|
|
||||||
|
// longestConnected chooses the member with longest active-since-time.
|
||||||
|
// It returns false, if nothing is active.
|
||||||
|
func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
|
||||||
|
var longest types.ID
|
||||||
|
var oldest time.Time
|
||||||
|
for _, id := range membs {
|
||||||
|
tm := tp.ActiveSince(id)
|
||||||
|
if tm.IsZero() { // inactive
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldest.IsZero() { // first longest candidate
|
||||||
|
oldest = tm
|
||||||
|
longest = id
|
||||||
|
}
|
||||||
|
|
||||||
|
if tm.Before(oldest) {
|
||||||
|
oldest = tm
|
||||||
|
longest = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if uint64(longest) == 0 {
|
||||||
|
return longest, false
|
||||||
|
}
|
||||||
|
return longest, true
|
||||||
|
}
|
||||||
|
|
||||||
|
type notifier struct {
|
||||||
|
c chan struct{}
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNotifier() *notifier {
|
||||||
|
return ¬ifier{
|
||||||
|
c: make(chan struct{}, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nc *notifier) notify(err error) {
|
||||||
|
nc.err = err
|
||||||
|
close(nc.c)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Res
|
||||||
proposalsFailed.Inc()
|
proposalsFailed.Inc()
|
||||||
a.s.w.Trigger(r.ID, nil) // GC wait
|
a.s.w.Trigger(r.ID, nil) // GC wait
|
||||||
return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
|
return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
|
||||||
case <-a.s.done:
|
case <-a.s.stopping:
|
||||||
}
|
}
|
||||||
return Response{}, ErrStopped
|
return Response{}, ErrStopped
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,17 +15,20 @@
|
||||||
package etcdserver
|
package etcdserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"bytes"
|
||||||
"strings"
|
"encoding/binary"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/auth"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/etcdserver/membership"
|
"github.com/coreos/etcd/etcdserver/membership"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
"github.com/coreos/etcd/lease/leasehttp"
|
"github.com/coreos/etcd/lease/leasehttp"
|
||||||
"github.com/coreos/etcd/mvcc"
|
"github.com/coreos/etcd/mvcc"
|
||||||
|
"github.com/coreos/etcd/raft"
|
||||||
|
|
||||||
|
"github.com/coreos/go-semver/semver"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -35,8 +38,15 @@ const (
|
||||||
// specify a large value might end up with shooting in the foot.
|
// specify a large value might end up with shooting in the foot.
|
||||||
maxRequestBytes = 1.5 * 1024 * 1024
|
maxRequestBytes = 1.5 * 1024 * 1024
|
||||||
|
|
||||||
// max timeout for waiting a v3 request to go through raft.
|
// In the health case, there might be a small gap (10s of entries) between
|
||||||
maxV3RequestTimeout = 5 * time.Second
|
// the applied index and committed index.
|
||||||
|
// However, if the committed entries are very heavy to apply, the gap might grow.
|
||||||
|
// We should stop accepting new proposals if the gap growing to a certain point.
|
||||||
|
maxGapBetweenApplyAndCommitIndex = 5000
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0"))
|
||||||
)
|
)
|
||||||
|
|
||||||
type RaftKV interface {
|
type RaftKV interface {
|
||||||
|
|
@ -56,6 +66,9 @@ type Lessor interface {
|
||||||
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
|
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
|
||||||
// is returned.
|
// is returned.
|
||||||
LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
|
LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
|
||||||
|
|
||||||
|
// LeaseTimeToLive retrieves lease information.
|
||||||
|
LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Authenticator interface {
|
type Authenticator interface {
|
||||||
|
|
@ -78,22 +91,44 @@ type Authenticator interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||||
var result *applyResult
|
// TODO: remove this checking when we release etcd 3.2
|
||||||
var err error
|
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||||
|
return s.legacyRange(ctx, r)
|
||||||
|
}
|
||||||
|
|
||||||
if r.Serializable {
|
if !r.Serializable {
|
||||||
var user string
|
err := s.linearizableReadNotify(ctx)
|
||||||
user, err = s.usernameFromCtx(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
result = s.applyV3.Apply(
|
|
||||||
&pb.InternalRaftRequest{
|
|
||||||
Header: &pb.RequestHeader{Username: user},
|
|
||||||
Range: r})
|
|
||||||
} else {
|
|
||||||
result, err = s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
|
||||||
}
|
}
|
||||||
|
var resp *pb.RangeResponse
|
||||||
|
var err error
|
||||||
|
chk := func(ai *auth.AuthInfo) error {
|
||||||
|
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||||
|
}
|
||||||
|
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||||
|
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||||
|
return nil, serr
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove this func when we release etcd 3.2
|
||||||
|
func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||||
|
if r.Serializable {
|
||||||
|
var resp *pb.RangeResponse
|
||||||
|
var err error
|
||||||
|
chk := func(ai *auth.AuthInfo) error {
|
||||||
|
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||||
|
}
|
||||||
|
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||||
|
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||||
|
return nil, serr
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -126,21 +161,54 @@ func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
var result *applyResult
|
// TODO: remove this checking when we release etcd 3.2
|
||||||
var err error
|
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||||
|
return s.legacyTxn(ctx, r)
|
||||||
if isTxnSerializable(r) {
|
|
||||||
user, err := s.usernameFromCtx(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
result = s.applyV3.Apply(
|
|
||||||
&pb.InternalRaftRequest{
|
|
||||||
Header: &pb.RequestHeader{Username: user},
|
|
||||||
Txn: r})
|
|
||||||
} else {
|
|
||||||
result, err = s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isTxnReadonly(r) {
|
||||||
|
if !isTxnSerializable(r) {
|
||||||
|
err := s.linearizableReadNotify(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var resp *pb.TxnResponse
|
||||||
|
var err error
|
||||||
|
chk := func(ai *auth.AuthInfo) error {
|
||||||
|
return checkTxnAuth(s.authStore, ai, r)
|
||||||
|
}
|
||||||
|
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||||
|
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||||
|
return nil, serr
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if result.err != nil {
|
||||||
|
return nil, result.err
|
||||||
|
}
|
||||||
|
return result.resp.(*pb.TxnResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove this func when we release etcd 3.2
|
||||||
|
func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||||
|
if isTxnSerializable(r) {
|
||||||
|
var resp *pb.TxnResponse
|
||||||
|
var err error
|
||||||
|
chk := func(ai *auth.AuthInfo) error {
|
||||||
|
return checkTxnAuth(s.authStore, ai, r)
|
||||||
|
}
|
||||||
|
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||||
|
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||||
|
return nil, serr
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -164,8 +232,22 @@ func isTxnSerializable(r *pb.TxnRequest) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isTxnReadonly(r *pb.TxnRequest) bool {
|
||||||
|
for _, u := range r.Success {
|
||||||
|
if r := u.GetRequestRange(); r == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, u := range r.Failure {
|
||||||
|
if r := u.GetRequestRange(); r == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Compaction: r})
|
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
|
||||||
if r.Physical && result != nil && result.physc != nil {
|
if r.Physical && result != nil && result.physc != nil {
|
||||||
<-result.physc
|
<-result.physc
|
||||||
// The compaction is done deleting keys; the hash is now settled
|
// The compaction is done deleting keys; the hash is now settled
|
||||||
|
|
@ -198,7 +280,7 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||||
// only use positive int64 id's
|
// only use positive int64 id's
|
||||||
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
||||||
}
|
}
|
||||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -209,7 +291,7 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -221,14 +303,13 @@ func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest)
|
||||||
|
|
||||||
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
||||||
ttl, err := s.lessor.Renew(id)
|
ttl, err := s.lessor.Renew(id)
|
||||||
if err == nil {
|
if err == nil { // already requested to primary lessor(leader)
|
||||||
return ttl, nil
|
return ttl, nil
|
||||||
}
|
}
|
||||||
if err != lease.ErrNotPrimary {
|
if err != lease.ErrNotPrimary {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// renewals don't go through raft; forward to leader manually
|
|
||||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
|
@ -239,7 +320,7 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
|
||||||
return -1, lerr
|
return -1, lerr
|
||||||
}
|
}
|
||||||
for _, url := range leader.PeerURLs {
|
for _, url := range leader.PeerURLs {
|
||||||
lurl := url + "/leases"
|
lurl := url + leasehttp.LeasePrefix
|
||||||
ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
|
ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
|
||||||
if err == nil || err == lease.ErrLeaseNotFound {
|
if err == nil || err == lease.ErrLeaseNotFound {
|
||||||
return ttl, err
|
return ttl, err
|
||||||
|
|
@ -249,6 +330,49 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
|
||||||
return -1, ErrTimeout
|
return -1, ErrTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||||
|
if s.Leader() == s.ID() {
|
||||||
|
// primary; timetolive directly from leader
|
||||||
|
le := s.lessor.Lookup(lease.LeaseID(r.ID))
|
||||||
|
if le == nil {
|
||||||
|
return nil, lease.ErrLeaseNotFound
|
||||||
|
}
|
||||||
|
// TODO: fill out ResponseHeader
|
||||||
|
resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
|
||||||
|
if r.Keys {
|
||||||
|
ks := le.Keys()
|
||||||
|
kbs := make([][]byte, len(ks))
|
||||||
|
for i := range ks {
|
||||||
|
kbs[i] = []byte(ks[i])
|
||||||
|
}
|
||||||
|
resp.Keys = kbs
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// forward to leader
|
||||||
|
for cctx.Err() == nil {
|
||||||
|
leader, err := s.waitLeader(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, url := range leader.PeerURLs {
|
||||||
|
lurl := url + leasehttp.LeaseInternalPrefix
|
||||||
|
resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
|
||||||
|
if err == nil {
|
||||||
|
return resp.LeaseTimeToLiveResponse, nil
|
||||||
|
}
|
||||||
|
if err == lease.ErrLeaseNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
|
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
|
||||||
leader := s.cluster.Member(s.Leader())
|
leader := s.cluster.Member(s.Leader())
|
||||||
for leader == nil {
|
for leader == nil {
|
||||||
|
|
@ -257,7 +381,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||||
select {
|
select {
|
||||||
case <-time.After(dur):
|
case <-time.After(dur):
|
||||||
leader = s.cluster.Member(s.Leader())
|
leader = s.cluster.Member(s.Leader())
|
||||||
case <-s.done:
|
case <-s.stopping:
|
||||||
return nil, ErrStopped
|
return nil, ErrStopped
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, ErrNoLeader
|
return nil, ErrNoLeader
|
||||||
|
|
@ -270,7 +394,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Alarm: r})
|
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -281,7 +405,7 @@ func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmRe
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -303,24 +427,47 @@ func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||||
st, err := s.AuthStore().GenSimpleToken()
|
var result *applyResult
|
||||||
|
|
||||||
|
err := s.linearizableReadNotify(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
internalReq := &pb.InternalAuthenticateRequest{
|
for {
|
||||||
Name: r.Name,
|
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
|
||||||
Password: r.Password,
|
if err != nil {
|
||||||
SimpleToken: st,
|
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := s.AuthStore().GenSimpleToken()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internalReq := &pb.InternalAuthenticateRequest{
|
||||||
|
Name: r.Name,
|
||||||
|
Password: r.Password,
|
||||||
|
SimpleToken: st,
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if result.err != nil {
|
||||||
|
return nil, result.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if checkedRevision != s.AuthStore().Revision() {
|
||||||
|
plog.Infof("revision when password checked is obsolete, retrying")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if result.err != nil {
|
|
||||||
return nil, result.err
|
|
||||||
}
|
|
||||||
return result.resp.(*pb.AuthenticateResponse), nil
|
return result.resp.(*pb.AuthenticateResponse), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -467,69 +614,52 @@ func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest
|
||||||
return result.resp.(*pb.AuthRoleDeleteResponse), nil
|
return result.resp.(*pb.AuthRoleDeleteResponse), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) isValidSimpleToken(token string) bool {
|
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||||
splitted := strings.Split(token, ".")
|
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
|
||||||
if len(splitted) != 2 {
|
for {
|
||||||
return false
|
ai, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||||
}
|
if err != nil {
|
||||||
index, err := strconv.Atoi(splitted[1])
|
return err
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAUTION: below index synchronization is required because this node
|
|
||||||
// might not receive and apply the log entry of Authenticate() RPC.
|
|
||||||
authApplied := false
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
if uint64(index) <= s.getAppliedIndex() {
|
|
||||||
authApplied = true
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
if ai == nil {
|
||||||
time.Sleep(100 * time.Millisecond)
|
// chk expects non-nil AuthInfo; use empty credentials
|
||||||
|
ai = &auth.AuthInfo{}
|
||||||
|
}
|
||||||
|
if err = chk(ai); err != nil {
|
||||||
|
if err == auth.ErrAuthOldRevision {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// fetch response for serialized request
|
||||||
|
get()
|
||||||
|
// empty credentials or current auth info means no need to retry
|
||||||
|
if ai.Revision == 0 || ai.Revision == s.authStore.Revision() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// avoid TOCTOU error, retry of the request is required.
|
||||||
}
|
}
|
||||||
|
|
||||||
if !authApplied {
|
|
||||||
plog.Errorf("timeout of waiting Authenticate() RPC")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *EtcdServer) usernameFromCtx(ctx context.Context) (string, error) {
|
func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||||
md, ok := metadata.FromContext(ctx)
|
ai := s.getAppliedIndex()
|
||||||
if !ok {
|
ci := s.getCommittedIndex()
|
||||||
return "", nil
|
if ci > ai+maxGapBetweenApplyAndCommitIndex {
|
||||||
|
return nil, ErrTooManyRequests
|
||||||
}
|
}
|
||||||
|
|
||||||
ts, tok := md["token"]
|
|
||||||
if !tok {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
token := ts[0]
|
|
||||||
if !s.isValidSimpleToken(token) {
|
|
||||||
return "", ErrInvalidAuthToken
|
|
||||||
}
|
|
||||||
|
|
||||||
username, uok := s.AuthStore().UsernameFromToken(token)
|
|
||||||
if !uok {
|
|
||||||
plog.Warningf("invalid auth token: %s", token)
|
|
||||||
return "", ErrInvalidAuthToken
|
|
||||||
}
|
|
||||||
return username, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
|
||||||
r.Header = &pb.RequestHeader{
|
r.Header = &pb.RequestHeader{
|
||||||
ID: s.reqIDGen.Next(),
|
ID: s.reqIDGen.Next(),
|
||||||
}
|
}
|
||||||
username, err := s.usernameFromCtx(ctx)
|
|
||||||
|
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.Header.Username = username
|
if authInfo != nil {
|
||||||
|
r.Header.Username = authInfo.Username
|
||||||
|
r.Header.AuthRevision = authInfo.Revision
|
||||||
|
}
|
||||||
|
|
||||||
data, err := r.Marshal()
|
data, err := r.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -546,7 +676,7 @@ func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.Intern
|
||||||
}
|
}
|
||||||
ch := s.w.Register(id)
|
ch := s.w.Register(id)
|
||||||
|
|
||||||
cctx, cancel := context.WithTimeout(ctx, maxV3RequestTimeout)
|
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
@ -566,5 +696,109 @@ func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.Intern
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||||
|
var result *applyResult
|
||||||
|
var err error
|
||||||
|
for {
|
||||||
|
result, err = s.processInternalRaftRequestOnce(ctx, r)
|
||||||
|
if err != auth.ErrAuthOldRevision {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
// Watchable returns a watchable interface attached to the etcdserver.
|
// Watchable returns a watchable interface attached to the etcdserver.
|
||||||
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
||||||
|
|
||||||
|
func (s *EtcdServer) linearizableReadLoop() {
|
||||||
|
var rs raft.ReadState
|
||||||
|
|
||||||
|
for {
|
||||||
|
ctx := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next())
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-s.readwaitc:
|
||||||
|
case <-s.stopping:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nextnr := newNotifier()
|
||||||
|
|
||||||
|
s.readMu.Lock()
|
||||||
|
nr := s.readNotifier
|
||||||
|
s.readNotifier = nextnr
|
||||||
|
s.readMu.Unlock()
|
||||||
|
|
||||||
|
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||||
|
if err := s.r.ReadIndex(cctx, ctx); err != nil {
|
||||||
|
cancel()
|
||||||
|
if err == raft.ErrStopped {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
plog.Errorf("failed to get read index from raft: %v", err)
|
||||||
|
nr.notify(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
var (
|
||||||
|
timeout bool
|
||||||
|
done bool
|
||||||
|
)
|
||||||
|
for !timeout && !done {
|
||||||
|
select {
|
||||||
|
case rs = <-s.r.readStateC:
|
||||||
|
done = bytes.Equal(rs.RequestCtx, ctx)
|
||||||
|
if !done {
|
||||||
|
// a previous request might time out. now we should ignore the response of it and
|
||||||
|
// continue waiting for the response of the current requests.
|
||||||
|
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
|
||||||
|
}
|
||||||
|
case <-time.After(s.Cfg.ReqTimeout()):
|
||||||
|
plog.Warningf("timed out waiting for read index response")
|
||||||
|
nr.notify(ErrTimeout)
|
||||||
|
timeout = true
|
||||||
|
case <-s.stopping:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !done {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai := s.getAppliedIndex(); ai < rs.Index {
|
||||||
|
select {
|
||||||
|
case <-s.applyWait.Wait(rs.Index):
|
||||||
|
case <-s.stopping:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// unblock all l-reads requested at indices before rs.Index
|
||||||
|
nr.notify(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
|
||||||
|
s.readMu.RLock()
|
||||||
|
nc := s.readNotifier
|
||||||
|
s.readMu.RUnlock()
|
||||||
|
|
||||||
|
// signal linearizable loop for current notify if it hasn't been already
|
||||||
|
select {
|
||||||
|
case s.readwaitc <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for read state notification
|
||||||
|
select {
|
||||||
|
case <-nc.c:
|
||||||
|
return nc.err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-s.done:
|
||||||
|
return ErrStopped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,8 @@ type bridge struct {
|
||||||
|
|
||||||
func newBridge(addr string) (*bridge, error) {
|
func newBridge(addr string) (*bridge, error) {
|
||||||
b := &bridge{
|
b := &bridge{
|
||||||
inaddr: addr + ".bridge",
|
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
|
||||||
|
inaddr: addr + "0",
|
||||||
outaddr: addr,
|
outaddr: addr,
|
||||||
conns: make(map[*bridgeConn]struct{}),
|
conns: make(map[*bridgeConn]struct{}),
|
||||||
stopc: make(chan struct{}, 1),
|
stopc: make(chan struct{}, 1),
|
||||||
|
|
|
||||||
|
|
@ -276,12 +276,18 @@ func (c *cluster) AddMember(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
||||||
|
if err := c.removeMember(t, id); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cluster) removeMember(t *testing.T, id uint64) error {
|
||||||
// send remove request to the cluster
|
// send remove request to the cluster
|
||||||
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
|
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
|
||||||
ma := client.NewMembersAPI(cc)
|
ma := client.NewMembersAPI(cc)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||||
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
|
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
|
||||||
t.Fatalf("unexpected remove error %v", err)
|
return err
|
||||||
}
|
}
|
||||||
cancel()
|
cancel()
|
||||||
newMembers := make([]*member, 0)
|
newMembers := make([]*member, 0)
|
||||||
|
|
@ -302,6 +308,7 @@ func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
||||||
}
|
}
|
||||||
c.Members = newMembers
|
c.Members = newMembers
|
||||||
c.waitMembersMatch(t, c.HTTPMembers())
|
c.waitMembersMatch(t, c.HTTPMembers())
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) Terminate(t *testing.T) {
|
func (c *cluster) Terminate(t *testing.T) {
|
||||||
|
|
@ -329,6 +336,7 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
|
||||||
|
|
||||||
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
|
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
|
||||||
|
|
||||||
|
// waitLeader waits until given members agree on the same leader.
|
||||||
func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
||||||
possibleLead := make(map[uint64]bool)
|
possibleLead := make(map[uint64]bool)
|
||||||
var lead uint64
|
var lead uint64
|
||||||
|
|
@ -362,6 +370,28 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *cluster) WaitNoLeader(t *testing.T) { c.waitNoLeader(t, c.Members) }
|
||||||
|
|
||||||
|
// waitNoLeader waits until given members lose leader.
|
||||||
|
func (c *cluster) waitNoLeader(t *testing.T, membs []*member) {
|
||||||
|
noLeader := false
|
||||||
|
for !noLeader {
|
||||||
|
noLeader = true
|
||||||
|
for _, m := range membs {
|
||||||
|
select {
|
||||||
|
case <-m.s.StopNotify():
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if m.s.Lead() != 0 {
|
||||||
|
noLeader = false
|
||||||
|
time.Sleep(10 * tickDuration)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *cluster) waitVersion() {
|
func (c *cluster) waitVersion() {
|
||||||
for _, m := range c.Members {
|
for _, m := range c.Members {
|
||||||
for {
|
for {
|
||||||
|
|
@ -374,7 +404,7 @@ func (c *cluster) waitVersion() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) name(i int) string {
|
func (c *cluster) name(i int) string {
|
||||||
return fmt.Sprint("node", i)
|
return fmt.Sprint(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isMembersEqual checks whether two members equal except ID field.
|
// isMembersEqual checks whether two members equal except ID field.
|
||||||
|
|
@ -390,7 +420,8 @@ func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
|
||||||
|
|
||||||
func newLocalListener(t *testing.T) net.Listener {
|
func newLocalListener(t *testing.T) net.Listener {
|
||||||
c := atomic.AddInt64(&localListenCount, 1)
|
c := atomic.AddInt64(&localListenCount, 1)
|
||||||
addr := fmt.Sprintf("127.0.0.1:%d.%d.sock", c+basePort, os.Getpid())
|
// Go 1.8+ allows only numbers in port
|
||||||
|
addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid())
|
||||||
return NewListenerWithAddr(t, addr)
|
return NewListenerWithAddr(t, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -418,6 +449,8 @@ type member struct {
|
||||||
grpcServer *grpc.Server
|
grpcServer *grpc.Server
|
||||||
grpcAddr string
|
grpcAddr string
|
||||||
grpcBridge *bridge
|
grpcBridge *bridge
|
||||||
|
|
||||||
|
keepDataDirTerminate bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *member) GRPCAddr() string { return m.grpcAddr }
|
func (m *member) GRPCAddr() string { return m.grpcAddr }
|
||||||
|
|
@ -480,7 +513,7 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
|
||||||
// listenGRPC starts a grpc server over a unix domain socket on the member
|
// listenGRPC starts a grpc server over a unix domain socket on the member
|
||||||
func (m *member) listenGRPC() error {
|
func (m *member) listenGRPC() error {
|
||||||
// prefix with localhost so cert has right domain
|
// prefix with localhost so cert has right domain
|
||||||
m.grpcAddr = "localhost:" + m.Name + ".sock"
|
m.grpcAddr = "localhost:" + m.Name
|
||||||
l, err := transport.NewUnixListener(m.grpcAddr)
|
l, err := transport.NewUnixListener(m.grpcAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
|
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
|
||||||
|
|
@ -495,6 +528,10 @@ func (m *member) listenGRPC() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *member) electionTimeout() time.Duration {
|
||||||
|
return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
func (m *member) DropConnections() { m.grpcBridge.Reset() }
|
func (m *member) DropConnections() { m.grpcBridge.Reset() }
|
||||||
|
|
||||||
// NewClientV3 creates a new grpc client connection to the member
|
// NewClientV3 creates a new grpc client connection to the member
|
||||||
|
|
@ -515,7 +552,7 @@ func NewClientV3(m *member) (*clientv3.Client, error) {
|
||||||
}
|
}
|
||||||
cfg.TLS = tls
|
cfg.TLS = tls
|
||||||
}
|
}
|
||||||
return clientv3.New(cfg)
|
return newClientV3(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone returns a member with the same server configuration. The returned
|
// Clone returns a member with the same server configuration. The returned
|
||||||
|
|
@ -653,7 +690,7 @@ func (m *member) Close() {
|
||||||
m.grpcServer.Stop()
|
m.grpcServer.Stop()
|
||||||
m.grpcServer = nil
|
m.grpcServer = nil
|
||||||
}
|
}
|
||||||
m.s.Stop()
|
m.s.HardStop()
|
||||||
for _, hs := range m.hss {
|
for _, hs := range m.hss {
|
||||||
hs.CloseClientConnections()
|
hs.CloseClientConnections()
|
||||||
hs.Close()
|
hs.Close()
|
||||||
|
|
@ -668,6 +705,15 @@ func (m *member) Stop(t *testing.T) {
|
||||||
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
|
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkLeaderTransition waits for leader transition, returning the new leader ID.
|
||||||
|
func checkLeaderTransition(t *testing.T, m *member, oldLead uint64) uint64 {
|
||||||
|
interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond
|
||||||
|
for m.s.Lead() == 0 || (m.s.Lead() == oldLead) {
|
||||||
|
time.Sleep(interval)
|
||||||
|
}
|
||||||
|
return m.s.Lead()
|
||||||
|
}
|
||||||
|
|
||||||
// StopNotify unblocks when a member stop completes
|
// StopNotify unblocks when a member stop completes
|
||||||
func (m *member) StopNotify() <-chan struct{} {
|
func (m *member) StopNotify() <-chan struct{} {
|
||||||
return m.s.StopNotify()
|
return m.s.StopNotify()
|
||||||
|
|
@ -702,12 +748,56 @@ func (m *member) Restart(t *testing.T) error {
|
||||||
func (m *member) Terminate(t *testing.T) {
|
func (m *member) Terminate(t *testing.T) {
|
||||||
plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr)
|
plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr)
|
||||||
m.Close()
|
m.Close()
|
||||||
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
|
if !m.keepDataDirTerminate {
|
||||||
t.Fatal(err)
|
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr)
|
plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Metric gets the metric value for a member
|
||||||
|
func (m *member) Metric(metricName string) (string, error) {
|
||||||
|
cfgtls := transport.TLSInfo{}
|
||||||
|
tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
cli := &http.Client{Transport: tr}
|
||||||
|
resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
b, rerr := ioutil.ReadAll(resp.Body)
|
||||||
|
if rerr != nil {
|
||||||
|
return "", rerr
|
||||||
|
}
|
||||||
|
lines := strings.Split(string(b), "\n")
|
||||||
|
for _, l := range lines {
|
||||||
|
if strings.HasPrefix(l, metricName) {
|
||||||
|
return strings.Split(l, " ")[1], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InjectPartition drops connections from m to others, vice versa.
|
||||||
|
func (m *member) InjectPartition(t *testing.T, others []*member) {
|
||||||
|
for _, other := range others {
|
||||||
|
m.s.CutPeer(other.s.ID())
|
||||||
|
other.s.CutPeer(m.s.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecoverPartition recovers connections from m to others, vice versa.
|
||||||
|
func (m *member) RecoverPartition(t *testing.T, others []*member) {
|
||||||
|
for _, other := range others {
|
||||||
|
m.s.MendPeer(other.s.ID())
|
||||||
|
other.s.MendPeer(m.s.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client {
|
func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client {
|
||||||
cfgtls := transport.TLSInfo{}
|
cfgtls := transport.TLSInfo{}
|
||||||
if tls != nil {
|
if tls != nil {
|
||||||
|
|
@ -803,14 +893,6 @@ type grpcAPI struct {
|
||||||
Watch pb.WatchClient
|
Watch pb.WatchClient
|
||||||
// Maintenance is the maintenance API for the client's connection.
|
// Maintenance is the maintenance API for the client's connection.
|
||||||
Maintenance pb.MaintenanceClient
|
Maintenance pb.MaintenanceClient
|
||||||
}
|
// Auth is the authentication API for the client's connection.
|
||||||
|
Auth pb.AuthClient
|
||||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
|
||||||
return grpcAPI{
|
|
||||||
pb.NewClusterClient(c.ActiveConnection()),
|
|
||||||
pb.NewKVClient(c.ActiveConnection()),
|
|
||||||
pb.NewLeaseClient(c.ActiveConnection()),
|
|
||||||
pb.NewWatchClient(c.ActiveConnection()),
|
|
||||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !cluster_proxy
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||||
|
return grpcAPI{
|
||||||
|
pb.NewClusterClient(c.ActiveConnection()),
|
||||||
|
pb.NewKVClient(c.ActiveConnection()),
|
||||||
|
pb.NewLeaseClient(c.ActiveConnection()),
|
||||||
|
pb.NewWatchClient(c.ActiveConnection()),
|
||||||
|
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||||
|
pb.NewAuthClient(c.ActiveConnection()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||||
|
return clientv3.New(cfg)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build cluster_proxy
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"github.com/coreos/etcd/proxy/grpcproxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pmu sync.Mutex
|
||||||
|
proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
|
||||||
|
)
|
||||||
|
|
||||||
|
type grpcClientProxy struct {
|
||||||
|
grpc grpcAPI
|
||||||
|
wdonec <-chan struct{}
|
||||||
|
kvdonec <-chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||||
|
pmu.Lock()
|
||||||
|
defer pmu.Unlock()
|
||||||
|
|
||||||
|
if v, ok := proxies[c]; ok {
|
||||||
|
return v.grpc
|
||||||
|
}
|
||||||
|
|
||||||
|
wp, wpch := grpcproxy.NewWatchProxy(c)
|
||||||
|
kvp, kvpch := grpcproxy.NewKvProxy(c)
|
||||||
|
grpc := grpcAPI{
|
||||||
|
pb.NewClusterClient(c.ActiveConnection()),
|
||||||
|
grpcproxy.KvServerToKvClient(kvp),
|
||||||
|
pb.NewLeaseClient(c.ActiveConnection()),
|
||||||
|
grpcproxy.WatchServerToWatchClient(wp),
|
||||||
|
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||||
|
pb.NewAuthClient(c.ActiveConnection()),
|
||||||
|
}
|
||||||
|
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch}
|
||||||
|
return grpc
|
||||||
|
}
|
||||||
|
|
||||||
|
type proxyCloser struct {
|
||||||
|
clientv3.Watcher
|
||||||
|
wdonec <-chan struct{}
|
||||||
|
kvdonec <-chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pc *proxyCloser) Close() error {
|
||||||
|
// client ctx is canceled before calling close, so kv will close out
|
||||||
|
<-pc.kvdonec
|
||||||
|
err := pc.Watcher.Close()
|
||||||
|
<-pc.wdonec
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||||
|
c, err := clientv3.New(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rpc := toGRPC(c)
|
||||||
|
c.KV = clientv3.NewKVFromKVClient(rpc.KV)
|
||||||
|
pmu.Lock()
|
||||||
|
c.Watcher = &proxyCloser{
|
||||||
|
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch),
|
||||||
|
wdonec: proxies[c].wdonec,
|
||||||
|
kvdonec: proxies[c].kvdonec,
|
||||||
|
}
|
||||||
|
pmu.Unlock()
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
@ -16,21 +16,35 @@ package leasehttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/lease"
|
"github.com/coreos/etcd/lease"
|
||||||
|
"github.com/coreos/etcd/lease/leasepb"
|
||||||
|
"github.com/coreos/etcd/pkg/httputil"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
LeasePrefix = "/leases"
|
||||||
|
LeaseInternalPrefix = "/leases/internal"
|
||||||
|
applyTimeout = time.Second
|
||||||
|
ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
|
||||||
|
)
|
||||||
|
|
||||||
// NewHandler returns an http Handler for lease renewals
|
// NewHandler returns an http Handler for lease renewals
|
||||||
func NewHandler(l lease.Lessor) http.Handler {
|
func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
|
||||||
return &leaseHandler{l}
|
return &leaseHandler{l, waitch}
|
||||||
}
|
}
|
||||||
|
|
||||||
type leaseHandler struct{ l lease.Lessor }
|
type leaseHandler struct {
|
||||||
|
l lease.Lessor
|
||||||
|
waitch func() <-chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
if r.Method != "POST" {
|
if r.Method != "POST" {
|
||||||
|
|
@ -44,28 +58,81 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lreq := pb.LeaseKeepAliveRequest{}
|
var v []byte
|
||||||
if err := lreq.Unmarshal(b); err != nil {
|
switch r.URL.Path {
|
||||||
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
case LeasePrefix:
|
||||||
return
|
lreq := pb.LeaseKeepAliveRequest{}
|
||||||
}
|
if err := lreq.Unmarshal(b); err != nil {
|
||||||
|
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-h.waitch():
|
||||||
|
case <-time.After(applyTimeout):
|
||||||
|
http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
|
||||||
|
if err != nil {
|
||||||
|
if err == lease.ErrLeaseNotFound {
|
||||||
|
http.Error(w, err.Error(), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
if err != nil {
|
return
|
||||||
if err == lease.ErrLeaseNotFound {
|
}
|
||||||
http.Error(w, err.Error(), http.StatusNotFound)
|
// TODO: fill out ResponseHeader
|
||||||
|
resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
|
||||||
|
v, err = resp.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
case LeaseInternalPrefix:
|
||||||
return
|
lreq := leasepb.LeaseInternalRequest{}
|
||||||
}
|
if err := lreq.Unmarshal(b); err != nil {
|
||||||
|
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-h.waitch():
|
||||||
|
case <-time.After(applyTimeout):
|
||||||
|
http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
|
||||||
|
if l == nil {
|
||||||
|
http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO: fill out ResponseHeader
|
||||||
|
resp := &leasepb.LeaseInternalResponse{
|
||||||
|
LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
|
||||||
|
Header: &pb.ResponseHeader{},
|
||||||
|
ID: lreq.LeaseTimeToLiveRequest.ID,
|
||||||
|
TTL: int64(l.Remaining().Seconds()),
|
||||||
|
GrantedTTL: l.TTL(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if lreq.LeaseTimeToLiveRequest.Keys {
|
||||||
|
ks := l.Keys()
|
||||||
|
kbs := make([][]byte, len(ks))
|
||||||
|
for i := range ks {
|
||||||
|
kbs[i] = []byte(ks[i])
|
||||||
|
}
|
||||||
|
resp.LeaseTimeToLiveResponse.Keys = kbs
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: fill out ResponseHeader
|
v, err = resp.Marshal()
|
||||||
resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
|
if err != nil {
|
||||||
v, err := resp.Marshal()
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
if err != nil {
|
return
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -92,15 +159,17 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
|
||||||
|
|
||||||
resp, err := cc.Do(req)
|
resp, err := cc.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO detect if leader failed and retry?
|
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
b, err := readResponse(resp)
|
||||||
resp.Body.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusRequestTimeout {
|
||||||
|
return -1, ErrLeaseHTTPTimeout
|
||||||
|
}
|
||||||
|
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
return -1, lease.ErrLeaseNotFound
|
return -1, lease.ErrLeaseNotFound
|
||||||
}
|
}
|
||||||
|
|
@ -118,3 +187,74 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
|
||||||
}
|
}
|
||||||
return lresp.TTL, nil
|
return lresp.TTL, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TimeToLiveHTTP retrieves lease information of the given lease ID.
|
||||||
|
func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
|
||||||
|
// will post lreq protobuf to leader
|
||||||
|
lreq, err := (&leasepb.LeaseInternalRequest{&pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: keys}}).Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/protobuf")
|
||||||
|
|
||||||
|
cancel := httputil.RequestCanceler(req)
|
||||||
|
|
||||||
|
cc := &http.Client{Transport: rt}
|
||||||
|
var b []byte
|
||||||
|
// buffer errc channel so that errc don't block inside the go routinue
|
||||||
|
errc := make(chan error, 2)
|
||||||
|
go func() {
|
||||||
|
resp, err := cc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
errc <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b, err = readResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
errc <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusRequestTimeout {
|
||||||
|
errc <- ErrLeaseHTTPTimeout
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
errc <- lease.ErrLeaseNotFound
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
errc <- fmt.Errorf("lease: unknown error(%s)", string(b))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errc <- nil
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case derr := <-errc:
|
||||||
|
if derr != nil {
|
||||||
|
return nil, derr
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
cancel()
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
lresp := &leasepb.LeaseInternalResponse{}
|
||||||
|
if err := lresp.Unmarshal(b); err != nil {
|
||||||
|
return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
|
||||||
|
}
|
||||||
|
if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
|
||||||
|
return nil, fmt.Errorf("lease: renew id mismatch")
|
||||||
|
}
|
||||||
|
return lresp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readResponse(resp *http.Response) (b []byte, err error) {
|
||||||
|
b, err = ioutil.ReadAll(resp.Body)
|
||||||
|
httputil.GracefulClose(resp)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,8 @@
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Lease
|
Lease
|
||||||
|
LeaseInternalRequest
|
||||||
|
LeaseInternalResponse
|
||||||
*/
|
*/
|
||||||
package leasepb
|
package leasepb
|
||||||
|
|
||||||
|
|
@ -20,6 +22,8 @@ import (
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
|
|
||||||
|
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
io "io"
|
io "io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -30,11 +34,13 @@ var _ = math.Inf
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.ProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Lease struct {
|
type Lease struct {
|
||||||
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
|
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||||
TTL int64 `protobuf:"varint,2,opt,name=TTL,json=tTL,proto3" json:"TTL,omitempty"`
|
TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Lease) Reset() { *m = Lease{} }
|
func (m *Lease) Reset() { *m = Lease{} }
|
||||||
|
|
@ -42,62 +48,138 @@ func (m *Lease) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Lease) ProtoMessage() {}
|
func (*Lease) ProtoMessage() {}
|
||||||
func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} }
|
func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} }
|
||||||
|
|
||||||
|
type LeaseInternalRequest struct {
|
||||||
|
LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
|
||||||
|
func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*LeaseInternalRequest) ProtoMessage() {}
|
||||||
|
func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} }
|
||||||
|
|
||||||
|
type LeaseInternalResponse struct {
|
||||||
|
LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
|
||||||
|
func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*LeaseInternalResponse) ProtoMessage() {}
|
||||||
|
func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Lease)(nil), "leasepb.Lease")
|
proto.RegisterType((*Lease)(nil), "leasepb.Lease")
|
||||||
|
proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
|
||||||
|
proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
|
||||||
}
|
}
|
||||||
func (m *Lease) Marshal() (data []byte, err error) {
|
func (m *Lease) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Lease) MarshalTo(data []byte) (int, error) {
|
func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.ID != 0 {
|
if m.ID != 0 {
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintLease(data, i, uint64(m.ID))
|
i = encodeVarintLease(dAtA, i, uint64(m.ID))
|
||||||
}
|
}
|
||||||
if m.TTL != 0 {
|
if m.TTL != 0 {
|
||||||
data[i] = 0x10
|
dAtA[i] = 0x10
|
||||||
i++
|
i++
|
||||||
i = encodeVarintLease(data, i, uint64(m.TTL))
|
i = encodeVarintLease(dAtA, i, uint64(m.TTL))
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Lease(data []byte, offset int, v uint64) int {
|
func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
|
||||||
data[offset] = uint8(v)
|
size := m.Size()
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA = make([]byte, size)
|
||||||
data[offset+2] = uint8(v >> 16)
|
n, err := m.MarshalTo(dAtA)
|
||||||
data[offset+3] = uint8(v >> 24)
|
if err != nil {
|
||||||
data[offset+4] = uint8(v >> 32)
|
return nil, err
|
||||||
data[offset+5] = uint8(v >> 40)
|
}
|
||||||
data[offset+6] = uint8(v >> 48)
|
return dAtA[:n], nil
|
||||||
data[offset+7] = uint8(v >> 56)
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.LeaseTimeToLiveRequest != nil {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size()))
|
||||||
|
n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n1
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.LeaseTimeToLiveResponse != nil {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size()))
|
||||||
|
n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Lease(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Lease(data []byte, offset int, v uint32) int {
|
func encodeFixed32Lease(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintLease(data []byte, offset int, v uint64) int {
|
func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *Lease) Size() (n int) {
|
func (m *Lease) Size() (n int) {
|
||||||
|
|
@ -112,6 +194,26 @@ func (m *Lease) Size() (n int) {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.LeaseTimeToLiveRequest != nil {
|
||||||
|
l = m.LeaseTimeToLiveRequest.Size()
|
||||||
|
n += 1 + l + sovLease(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LeaseInternalResponse) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.LeaseTimeToLiveResponse != nil {
|
||||||
|
l = m.LeaseTimeToLiveResponse.Size()
|
||||||
|
n += 1 + l + sovLease(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func sovLease(x uint64) (n int) {
|
func sovLease(x uint64) (n int) {
|
||||||
for {
|
for {
|
||||||
n++
|
n++
|
||||||
|
|
@ -125,8 +227,8 @@ func sovLease(x uint64) (n int) {
|
||||||
func sozLease(x uint64) (n int) {
|
func sozLease(x uint64) (n int) {
|
||||||
return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
func (m *Lease) Unmarshal(data []byte) error {
|
func (m *Lease) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -138,7 +240,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -166,7 +268,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ID |= (int64(b) & 0x7F) << shift
|
m.ID |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -185,7 +287,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.TTL |= (int64(b) & 0x7F) << shift
|
m.TTL |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -194,7 +296,7 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipLease(data[iNdEx:])
|
skippy, err := skipLease(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -213,8 +315,174 @@ func (m *Lease) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipLease(data []byte) (n int, err error) {
|
func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowLease
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowLease
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.LeaseTimeToLiveRequest == nil {
|
||||||
|
m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{}
|
||||||
|
}
|
||||||
|
if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipLease(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowLease
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowLease
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.LeaseTimeToLiveResponse == nil {
|
||||||
|
m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{}
|
||||||
|
}
|
||||||
|
if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipLease(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthLease
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipLease(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
|
|
@ -225,7 +493,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -243,7 +511,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -260,7 +528,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -283,7 +551,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -294,7 +562,7 @@ func skipLease(data []byte) (n int, err error) {
|
||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipLease(data[start:])
|
next, err := skipLease(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -318,14 +586,23 @@ var (
|
||||||
ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) }
|
||||||
|
|
||||||
var fileDescriptorLease = []byte{
|
var fileDescriptorLease = []byte{
|
||||||
// 126 bytes of a gzipped FileDescriptorProto
|
// 233 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
|
||||||
0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
|
0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
|
||||||
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x49, 0x93, 0x8b, 0xd5, 0x07, 0xa4,
|
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45,
|
||||||
0x40, 0x88, 0x8f, 0x8b, 0xc9, 0xd3, 0x45, 0x82, 0x51, 0x81, 0x51, 0x83, 0x39, 0x88, 0x29, 0xd3,
|
0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92,
|
||||||
0x45, 0x48, 0x80, 0x8b, 0x39, 0x24, 0xc4, 0x47, 0x82, 0x09, 0x2c, 0xc0, 0x5c, 0x12, 0xe2, 0xe3,
|
0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48,
|
||||||
0x24, 0x71, 0xe2, 0xa1, 0x1c, 0xc3, 0x85, 0x87, 0x72, 0x0c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8,
|
||||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0xb3, 0x8c,
|
0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4,
|
||||||
0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xa0, 0x42, 0x1a, 0x79, 0x00, 0x00, 0x00,
|
0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0,
|
||||||
|
0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7,
|
||||||
|
0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14,
|
||||||
|
0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29,
|
||||||
|
0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72,
|
||||||
|
0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
|
||||||
|
0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ syntax = "proto3";
|
||||||
package leasepb;
|
package leasepb;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
import "etcd/etcdserver/etcdserverpb/rpc.proto";
|
||||||
|
|
||||||
option (gogoproto.marshaler_all) = true;
|
option (gogoproto.marshaler_all) = true;
|
||||||
option (gogoproto.sizer_all) = true;
|
option (gogoproto.sizer_all) = true;
|
||||||
|
|
@ -13,3 +14,11 @@ message Lease {
|
||||||
int64 ID = 1;
|
int64 ID = 1;
|
||||||
int64 TTL = 2;
|
int64 TTL = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message LeaseInternalRequest {
|
||||||
|
etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LeaseInternalResponse {
|
||||||
|
etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,11 +18,14 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/lease/leasepb"
|
"github.com/coreos/etcd/lease/leasepb"
|
||||||
"github.com/coreos/etcd/mvcc/backend"
|
"github.com/coreos/etcd/mvcc/backend"
|
||||||
|
"github.com/coreos/etcd/pkg/monotime"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -32,9 +35,8 @@ const (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
leaseBucketName = []byte("lease")
|
leaseBucketName = []byte("lease")
|
||||||
// do not use maxInt64 since it can overflow time which will add
|
|
||||||
// the offset of unix time (1970yr to seconds).
|
forever = monotime.Time(math.MaxInt64)
|
||||||
forever = time.Unix(math.MaxInt64>>1, 0)
|
|
||||||
|
|
||||||
ErrNotPrimary = errors.New("not a primary lessor")
|
ErrNotPrimary = errors.New("not a primary lessor")
|
||||||
ErrLeaseNotFound = errors.New("lease not found")
|
ErrLeaseNotFound = errors.New("lease not found")
|
||||||
|
|
@ -75,6 +77,10 @@ type Lessor interface {
|
||||||
// If the lease does not exist, an error will be returned.
|
// If the lease does not exist, an error will be returned.
|
||||||
Attach(id LeaseID, items []LeaseItem) error
|
Attach(id LeaseID, items []LeaseItem) error
|
||||||
|
|
||||||
|
// GetLease returns LeaseID for given item.
|
||||||
|
// If no lease found, NoLease value will be returned.
|
||||||
|
GetLease(item LeaseItem) LeaseID
|
||||||
|
|
||||||
// Detach detaches given leaseItem from the lease with given LeaseID.
|
// Detach detaches given leaseItem from the lease with given LeaseID.
|
||||||
// If the lease does not exist, an error will be returned.
|
// If the lease does not exist, an error will be returned.
|
||||||
Detach(id LeaseID, items []LeaseItem) error
|
Detach(id LeaseID, items []LeaseItem) error
|
||||||
|
|
@ -110,20 +116,9 @@ type Lessor interface {
|
||||||
type lessor struct {
|
type lessor struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
// primary indicates if this lessor is the primary lessor. The primary
|
// demotec is set when the lessor is the primary.
|
||||||
// lessor manages lease expiration and renew.
|
// demotec will be closed if the lessor is demoted.
|
||||||
//
|
demotec chan struct{}
|
||||||
// in etcd, raft leader is the primary. Thus there might be two primary
|
|
||||||
// leaders at the same time (raft allows concurrent leader but with different term)
|
|
||||||
// for at most a leader election timeout.
|
|
||||||
// The old primary leader cannot affect the correctness since its proposal has a
|
|
||||||
// smaller term and will not be committed.
|
|
||||||
//
|
|
||||||
// TODO: raft follower do not forward lease management proposals. There might be a
|
|
||||||
// very small window (within second normally which depends on go scheduling) that
|
|
||||||
// a raft follow is the primary between the raft leader demotion and lessor demotion.
|
|
||||||
// Usually this should not be a problem. Lease should not be that sensitive to timing.
|
|
||||||
primary bool
|
|
||||||
|
|
||||||
// TODO: probably this should be a heap with a secondary
|
// TODO: probably this should be a heap with a secondary
|
||||||
// id index.
|
// id index.
|
||||||
|
|
@ -133,6 +128,8 @@ type lessor struct {
|
||||||
// findExpiredLeases and Renew should be the most frequent operations.
|
// findExpiredLeases and Renew should be the most frequent operations.
|
||||||
leaseMap map[LeaseID]*Lease
|
leaseMap map[LeaseID]*Lease
|
||||||
|
|
||||||
|
itemMap map[LeaseItem]LeaseID
|
||||||
|
|
||||||
// When a lease expires, the lessor will delete the
|
// When a lease expires, the lessor will delete the
|
||||||
// leased range (or key) by the RangeDeleter.
|
// leased range (or key) by the RangeDeleter.
|
||||||
rd RangeDeleter
|
rd RangeDeleter
|
||||||
|
|
@ -159,6 +156,7 @@ func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor {
|
||||||
func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
|
func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
|
||||||
l := &lessor{
|
l := &lessor{
|
||||||
leaseMap: make(map[LeaseID]*Lease),
|
leaseMap: make(map[LeaseID]*Lease),
|
||||||
|
itemMap: make(map[LeaseItem]LeaseID),
|
||||||
b: b,
|
b: b,
|
||||||
minLeaseTTL: minLeaseTTL,
|
minLeaseTTL: minLeaseTTL,
|
||||||
// expiredC is a small buffered chan to avoid unnecessary blocking.
|
// expiredC is a small buffered chan to avoid unnecessary blocking.
|
||||||
|
|
@ -173,6 +171,23 @@ func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isPrimary indicates if this lessor is the primary lessor. The primary
|
||||||
|
// lessor manages lease expiration and renew.
|
||||||
|
//
|
||||||
|
// in etcd, raft leader is the primary. Thus there might be two primary
|
||||||
|
// leaders at the same time (raft allows concurrent leader but with different term)
|
||||||
|
// for at most a leader election timeout.
|
||||||
|
// The old primary leader cannot affect the correctness since its proposal has a
|
||||||
|
// smaller term and will not be committed.
|
||||||
|
//
|
||||||
|
// TODO: raft follower do not forward lease management proposals. There might be a
|
||||||
|
// very small window (within second normally which depends on go scheduling) that
|
||||||
|
// a raft follow is the primary between the raft leader demotion and lessor demotion.
|
||||||
|
// Usually this should not be a problem. Lease should not be that sensitive to timing.
|
||||||
|
func (le *lessor) isPrimary() bool {
|
||||||
|
return le.demotec != nil
|
||||||
|
}
|
||||||
|
|
||||||
func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
|
func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
|
||||||
le.mu.Lock()
|
le.mu.Lock()
|
||||||
defer le.mu.Unlock()
|
defer le.mu.Unlock()
|
||||||
|
|
@ -187,7 +202,12 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
|
||||||
|
|
||||||
// TODO: when lessor is under high load, it should give out lease
|
// TODO: when lessor is under high load, it should give out lease
|
||||||
// with longer TTL to reduce renew load.
|
// with longer TTL to reduce renew load.
|
||||||
l := &Lease{ID: id, TTL: ttl, itemSet: make(map[LeaseItem]struct{})}
|
l := &Lease{
|
||||||
|
ID: id,
|
||||||
|
ttl: ttl,
|
||||||
|
itemSet: make(map[LeaseItem]struct{}),
|
||||||
|
revokec: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
le.mu.Lock()
|
le.mu.Lock()
|
||||||
defer le.mu.Unlock()
|
defer le.mu.Unlock()
|
||||||
|
|
@ -196,11 +216,11 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
|
||||||
return nil, ErrLeaseExists
|
return nil, ErrLeaseExists
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.TTL < le.minLeaseTTL {
|
if l.ttl < le.minLeaseTTL {
|
||||||
l.TTL = le.minLeaseTTL
|
l.ttl = le.minLeaseTTL
|
||||||
}
|
}
|
||||||
|
|
||||||
if le.primary {
|
if le.isPrimary() {
|
||||||
l.refresh(0)
|
l.refresh(0)
|
||||||
} else {
|
} else {
|
||||||
l.forever()
|
l.forever()
|
||||||
|
|
@ -220,6 +240,7 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||||
le.mu.Unlock()
|
le.mu.Unlock()
|
||||||
return ErrLeaseNotFound
|
return ErrLeaseNotFound
|
||||||
}
|
}
|
||||||
|
defer close(l.revokec)
|
||||||
// unlock before doing external work
|
// unlock before doing external work
|
||||||
le.mu.Unlock()
|
le.mu.Unlock()
|
||||||
|
|
||||||
|
|
@ -228,8 +249,13 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
tid := le.rd.TxnBegin()
|
tid := le.rd.TxnBegin()
|
||||||
for item := range l.itemSet {
|
|
||||||
_, _, err := le.rd.TxnDeleteRange(tid, []byte(item.Key), nil)
|
// sort keys so deletes are in same order among all members,
|
||||||
|
// otherwise the backened hashes will be different
|
||||||
|
keys := l.Keys()
|
||||||
|
sort.StringSlice(keys).Sort()
|
||||||
|
for _, key := range keys {
|
||||||
|
_, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
@ -255,36 +281,55 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||||
// has expired, an error will be returned.
|
// has expired, an error will be returned.
|
||||||
func (le *lessor) Renew(id LeaseID) (int64, error) {
|
func (le *lessor) Renew(id LeaseID) (int64, error) {
|
||||||
le.mu.Lock()
|
le.mu.Lock()
|
||||||
defer le.mu.Unlock()
|
|
||||||
|
|
||||||
if !le.primary {
|
unlock := func() { le.mu.Unlock() }
|
||||||
|
defer func() { unlock() }()
|
||||||
|
|
||||||
|
if !le.isPrimary() {
|
||||||
// forward renew request to primary instead of returning error.
|
// forward renew request to primary instead of returning error.
|
||||||
return -1, ErrNotPrimary
|
return -1, ErrNotPrimary
|
||||||
}
|
}
|
||||||
|
|
||||||
|
demotec := le.demotec
|
||||||
|
|
||||||
l := le.leaseMap[id]
|
l := le.leaseMap[id]
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return -1, ErrLeaseNotFound
|
return -1, ErrLeaseNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if l.expired() {
|
||||||
|
le.mu.Unlock()
|
||||||
|
unlock = func() {}
|
||||||
|
select {
|
||||||
|
// A expired lease might be pending for revoking or going through
|
||||||
|
// quorum to be revoked. To be accurate, renew request must wait for the
|
||||||
|
// deletion to complete.
|
||||||
|
case <-l.revokec:
|
||||||
|
return -1, ErrLeaseNotFound
|
||||||
|
// The expired lease might fail to be revoked if the primary changes.
|
||||||
|
// The caller will retry on ErrNotPrimary.
|
||||||
|
case <-demotec:
|
||||||
|
return -1, ErrNotPrimary
|
||||||
|
case <-le.stopC:
|
||||||
|
return -1, ErrNotPrimary
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
l.refresh(0)
|
l.refresh(0)
|
||||||
return l.TTL, nil
|
return l.ttl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (le *lessor) Lookup(id LeaseID) *Lease {
|
func (le *lessor) Lookup(id LeaseID) *Lease {
|
||||||
le.mu.Lock()
|
le.mu.Lock()
|
||||||
defer le.mu.Unlock()
|
defer le.mu.Unlock()
|
||||||
if l, ok := le.leaseMap[id]; ok {
|
return le.leaseMap[id]
|
||||||
return l
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (le *lessor) Promote(extend time.Duration) {
|
func (le *lessor) Promote(extend time.Duration) {
|
||||||
le.mu.Lock()
|
le.mu.Lock()
|
||||||
defer le.mu.Unlock()
|
defer le.mu.Unlock()
|
||||||
|
|
||||||
le.primary = true
|
le.demotec = make(chan struct{})
|
||||||
|
|
||||||
// refresh the expiries of all leases.
|
// refresh the expiries of all leases.
|
||||||
for _, l := range le.leaseMap {
|
for _, l := range le.leaseMap {
|
||||||
|
|
@ -301,7 +346,10 @@ func (le *lessor) Demote() {
|
||||||
l.forever()
|
l.forever()
|
||||||
}
|
}
|
||||||
|
|
||||||
le.primary = false
|
if le.demotec != nil {
|
||||||
|
close(le.demotec)
|
||||||
|
le.demotec = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attach attaches items to the lease with given ID. When the lease
|
// Attach attaches items to the lease with given ID. When the lease
|
||||||
|
|
@ -316,12 +364,22 @@ func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
|
||||||
return ErrLeaseNotFound
|
return ErrLeaseNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
l.mu.Lock()
|
||||||
for _, it := range items {
|
for _, it := range items {
|
||||||
l.itemSet[it] = struct{}{}
|
l.itemSet[it] = struct{}{}
|
||||||
|
le.itemMap[it] = id
|
||||||
}
|
}
|
||||||
|
l.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (le *lessor) GetLease(item LeaseItem) LeaseID {
|
||||||
|
le.mu.Lock()
|
||||||
|
id := le.itemMap[item]
|
||||||
|
le.mu.Unlock()
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
// Detach detaches items from the lease with given ID.
|
// Detach detaches items from the lease with given ID.
|
||||||
// If the given lease does not exist, an error will be returned.
|
// If the given lease does not exist, an error will be returned.
|
||||||
func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
|
func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
|
||||||
|
|
@ -333,9 +391,12 @@ func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
|
||||||
return ErrLeaseNotFound
|
return ErrLeaseNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
l.mu.Lock()
|
||||||
for _, it := range items {
|
for _, it := range items {
|
||||||
delete(l.itemSet, it)
|
delete(l.itemSet, it)
|
||||||
|
delete(le.itemMap, it)
|
||||||
}
|
}
|
||||||
|
l.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -346,7 +407,7 @@ func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
|
||||||
le.b = b
|
le.b = b
|
||||||
le.rd = rd
|
le.rd = rd
|
||||||
le.leaseMap = make(map[LeaseID]*Lease)
|
le.leaseMap = make(map[LeaseID]*Lease)
|
||||||
|
le.itemMap = make(map[LeaseItem]LeaseID)
|
||||||
le.initAndRecover()
|
le.initAndRecover()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -366,7 +427,7 @@ func (le *lessor) runLoop() {
|
||||||
var ls []*Lease
|
var ls []*Lease
|
||||||
|
|
||||||
le.mu.Lock()
|
le.mu.Lock()
|
||||||
if le.primary {
|
if le.isPrimary() {
|
||||||
ls = le.findExpiredLeases()
|
ls = le.findExpiredLeases()
|
||||||
}
|
}
|
||||||
le.mu.Unlock()
|
le.mu.Unlock()
|
||||||
|
|
@ -395,12 +456,11 @@ func (le *lessor) runLoop() {
|
||||||
// leases that needed to be revoked.
|
// leases that needed to be revoked.
|
||||||
func (le *lessor) findExpiredLeases() []*Lease {
|
func (le *lessor) findExpiredLeases() []*Lease {
|
||||||
leases := make([]*Lease, 0, 16)
|
leases := make([]*Lease, 0, 16)
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
for _, l := range le.leaseMap {
|
for _, l := range le.leaseMap {
|
||||||
// TODO: probably should change to <= 100-500 millisecond to
|
// TODO: probably should change to <= 100-500 millisecond to
|
||||||
// make up committing latency.
|
// make up committing latency.
|
||||||
if l.expiry.Sub(now) <= 0 {
|
if l.expired() {
|
||||||
leases = append(leases, l)
|
leases = append(leases, l)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -408,15 +468,6 @@ func (le *lessor) findExpiredLeases() []*Lease {
|
||||||
return leases
|
return leases
|
||||||
}
|
}
|
||||||
|
|
||||||
// get gets the lease with given id.
|
|
||||||
// get is a helper function for testing, at least for now.
|
|
||||||
func (le *lessor) get(id LeaseID) *Lease {
|
|
||||||
le.mu.Lock()
|
|
||||||
defer le.mu.Unlock()
|
|
||||||
|
|
||||||
return le.leaseMap[id]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (le *lessor) initAndRecover() {
|
func (le *lessor) initAndRecover() {
|
||||||
tx := le.b.BatchTx()
|
tx := le.b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
|
|
@ -437,11 +488,12 @@ func (le *lessor) initAndRecover() {
|
||||||
}
|
}
|
||||||
le.leaseMap[ID] = &Lease{
|
le.leaseMap[ID] = &Lease{
|
||||||
ID: ID,
|
ID: ID,
|
||||||
TTL: lpb.TTL,
|
ttl: lpb.TTL,
|
||||||
// itemSet will be filled in when recover key-value pairs
|
// itemSet will be filled in when recover key-value pairs
|
||||||
// set expiry to forever, refresh when promoted
|
// set expiry to forever, refresh when promoted
|
||||||
itemSet: make(map[LeaseItem]struct{}),
|
itemSet: make(map[LeaseItem]struct{}),
|
||||||
expiry: forever,
|
expiry: forever,
|
||||||
|
revokec: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
|
|
@ -451,17 +503,24 @@ func (le *lessor) initAndRecover() {
|
||||||
|
|
||||||
type Lease struct {
|
type Lease struct {
|
||||||
ID LeaseID
|
ID LeaseID
|
||||||
TTL int64 // time to live in seconds
|
ttl int64 // time to live in seconds
|
||||||
|
// expiry is time when lease should expire; must be 64-bit aligned.
|
||||||
|
expiry monotime.Time
|
||||||
|
|
||||||
|
// mu protects concurrent accesses to itemSet
|
||||||
|
mu sync.RWMutex
|
||||||
itemSet map[LeaseItem]struct{}
|
itemSet map[LeaseItem]struct{}
|
||||||
// expiry time in unixnano
|
revokec chan struct{}
|
||||||
expiry time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l Lease) persistTo(b backend.Backend) {
|
func (l *Lease) expired() bool {
|
||||||
|
return l.Remaining() <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Lease) persistTo(b backend.Backend) {
|
||||||
key := int64ToBytes(int64(l.ID))
|
key := int64ToBytes(int64(l.ID))
|
||||||
|
|
||||||
lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.TTL)}
|
lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)}
|
||||||
val, err := lpb.Marshal()
|
val, err := lpb.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("failed to marshal lease proto item")
|
panic("failed to marshal lease proto item")
|
||||||
|
|
@ -472,13 +531,36 @@ func (l Lease) persistTo(b backend.Backend) {
|
||||||
b.BatchTx().Unlock()
|
b.BatchTx().Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TTL returns the TTL of the Lease.
|
||||||
|
func (l *Lease) TTL() int64 {
|
||||||
|
return l.ttl
|
||||||
|
}
|
||||||
|
|
||||||
// refresh refreshes the expiry of the lease.
|
// refresh refreshes the expiry of the lease.
|
||||||
func (l *Lease) refresh(extend time.Duration) {
|
func (l *Lease) refresh(extend time.Duration) {
|
||||||
l.expiry = time.Now().Add(extend + time.Second*time.Duration(l.TTL))
|
t := monotime.Now().Add(extend + time.Duration(l.ttl)*time.Second)
|
||||||
|
atomic.StoreUint64((*uint64)(&l.expiry), uint64(t))
|
||||||
}
|
}
|
||||||
|
|
||||||
// forever sets the expiry of lease to be forever.
|
// forever sets the expiry of lease to be forever.
|
||||||
func (l *Lease) forever() { l.expiry = forever }
|
func (l *Lease) forever() { atomic.StoreUint64((*uint64)(&l.expiry), uint64(forever)) }
|
||||||
|
|
||||||
|
// Keys returns all the keys attached to the lease.
|
||||||
|
func (l *Lease) Keys() []string {
|
||||||
|
l.mu.RLock()
|
||||||
|
keys := make([]string, 0, len(l.itemSet))
|
||||||
|
for k := range l.itemSet {
|
||||||
|
keys = append(keys, k.Key)
|
||||||
|
}
|
||||||
|
l.mu.RUnlock()
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remaining returns the remaining time of the lease.
|
||||||
|
func (l *Lease) Remaining() time.Duration {
|
||||||
|
t := monotime.Time(atomic.LoadUint64((*uint64)(&l.expiry)))
|
||||||
|
return time.Duration(t - monotime.Now())
|
||||||
|
}
|
||||||
|
|
||||||
type LeaseItem struct {
|
type LeaseItem struct {
|
||||||
Key string
|
Key string
|
||||||
|
|
@ -502,6 +584,7 @@ func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
|
||||||
|
|
||||||
func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
|
func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
|
||||||
|
|
||||||
|
func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
|
||||||
func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
|
func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
|
||||||
|
|
||||||
func (fl *FakeLessor) Promote(extend time.Duration) {}
|
func (fl *FakeLessor) Promote(extend time.Duration) {}
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -40,7 +40,7 @@ var (
|
||||||
// This only works for linux.
|
// This only works for linux.
|
||||||
InitialMmapSize = int64(10 * 1024 * 1024 * 1024)
|
InitialMmapSize = int64(10 * 1024 * 1024 * 1024)
|
||||||
|
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/mvcc", "backend")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -303,6 +303,7 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpb, berr := tmptx.CreateBucketIfNotExists(next)
|
tmpb, berr := tmptx.CreateBucketIfNotExists(next)
|
||||||
|
tmpb.FillPercent = 0.9 // for seq write in for each
|
||||||
if berr != nil {
|
if berr != nil {
|
||||||
return berr
|
return berr
|
||||||
}
|
}
|
||||||
|
|
@ -319,6 +320,8 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tmpb = tmptx.Bucket(next)
|
tmpb = tmptx.Bucket(next)
|
||||||
|
tmpb.FillPercent = 0.9 // for seq write in for each
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
}
|
}
|
||||||
return tmpb.Put(k, v)
|
return tmpb.Put(k, v)
|
||||||
|
|
@ -334,7 +337,7 @@ func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, strin
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
tmpPath := path.Join(dir, "database")
|
tmpPath := filepath.Join(dir, "database")
|
||||||
return newBackend(tmpPath, batchInterval, batchLimit), tmpPath
|
return newBackend(tmpPath, batchInterval, batchLimit), tmpPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -162,11 +162,26 @@ func (t *batchTx) commit(stop bool) {
|
||||||
if t.pending == 0 && !stop {
|
if t.pending == 0 && !stop {
|
||||||
t.backend.mu.RLock()
|
t.backend.mu.RLock()
|
||||||
defer t.backend.mu.RUnlock()
|
defer t.backend.mu.RUnlock()
|
||||||
atomic.StoreInt64(&t.backend.size, t.tx.Size())
|
|
||||||
|
// batchTx.commit(true) calls *bolt.Tx.Commit, which
|
||||||
|
// initializes *bolt.Tx.db and *bolt.Tx.meta as nil,
|
||||||
|
// and subsequent *bolt.Tx.Size() call panics.
|
||||||
|
//
|
||||||
|
// This nil pointer reference panic happens when:
|
||||||
|
// 1. batchTx.commit(false) from newBatchTx
|
||||||
|
// 2. batchTx.commit(true) from stopping backend
|
||||||
|
// 3. batchTx.commit(false) from inflight mvcc Hash call
|
||||||
|
//
|
||||||
|
// Check if db is nil to prevent this panic
|
||||||
|
if t.tx.DB() != nil {
|
||||||
|
atomic.StoreInt64(&t.backend.size, t.tx.Size())
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
// gofail: var beforeCommit struct{}
|
||||||
err = t.tx.Commit()
|
err = t.tx.Commit()
|
||||||
|
// gofail: var afterCommit struct{}
|
||||||
commitDurations.Observe(time.Since(start).Seconds())
|
commitDurations.Observe(time.Since(start).Seconds())
|
||||||
atomic.AddInt64(&t.backend.commits, 1)
|
atomic.AddInt64(&t.backend.commits, 1)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,11 +25,11 @@ type index interface {
|
||||||
Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
|
Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
|
||||||
Range(key, end []byte, atRev int64) ([][]byte, []revision)
|
Range(key, end []byte, atRev int64) ([][]byte, []revision)
|
||||||
Put(key []byte, rev revision)
|
Put(key []byte, rev revision)
|
||||||
Restore(key []byte, created, modified revision, ver int64)
|
|
||||||
Tombstone(key []byte, rev revision) error
|
Tombstone(key []byte, rev revision) error
|
||||||
RangeSince(key, end []byte, rev int64) []revision
|
RangeSince(key, end []byte, rev int64) []revision
|
||||||
Compact(rev int64) map[revision]struct{}
|
Compact(rev int64) map[revision]struct{}
|
||||||
Equal(b index) bool
|
Equal(b index) bool
|
||||||
|
Insert(ki *keyIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
type treeIndex struct {
|
type treeIndex struct {
|
||||||
|
|
@ -58,21 +58,6 @@ func (ti *treeIndex) Put(key []byte, rev revision) {
|
||||||
okeyi.put(rev.main, rev.sub)
|
okeyi.put(rev.main, rev.sub)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ti *treeIndex) Restore(key []byte, created, modified revision, ver int64) {
|
|
||||||
keyi := &keyIndex{key: key}
|
|
||||||
|
|
||||||
ti.Lock()
|
|
||||||
defer ti.Unlock()
|
|
||||||
item := ti.tree.Get(keyi)
|
|
||||||
if item == nil {
|
|
||||||
keyi.restore(created, modified, ver)
|
|
||||||
ti.tree.ReplaceOrInsert(keyi)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
okeyi := item.(*keyIndex)
|
|
||||||
okeyi.put(modified.main, modified.sub)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
|
func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
|
||||||
keyi := &keyIndex{key: key}
|
keyi := &keyIndex{key: key}
|
||||||
|
|
||||||
|
|
@ -215,3 +200,9 @@ func (a *treeIndex) Equal(bi index) bool {
|
||||||
|
|
||||||
return equal
|
return equal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ti *treeIndex) Insert(ki *keyIndex) {
|
||||||
|
ti.Lock()
|
||||||
|
defer ti.Unlock()
|
||||||
|
ti.tree.ReplaceOrInsert(ki)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -74,8 +74,9 @@ type store struct {
|
||||||
// the main revision of the last compaction
|
// the main revision of the last compaction
|
||||||
compactMainRev int64
|
compactMainRev int64
|
||||||
|
|
||||||
tx backend.BatchTx
|
tx backend.BatchTx
|
||||||
txnID int64 // tracks the current txnID to verify txn operations
|
txnID int64 // tracks the current txnID to verify txn operations
|
||||||
|
txnModify bool
|
||||||
|
|
||||||
// bytesBuf8 is a byte slice of length 8
|
// bytesBuf8 is a byte slice of length 8
|
||||||
// to avoid a repetitive allocation in saveIndex.
|
// to avoid a repetitive allocation in saveIndex.
|
||||||
|
|
@ -180,7 +181,6 @@ func (s *store) TxnBegin() int64 {
|
||||||
s.currentRev.sub = 0
|
s.currentRev.sub = 0
|
||||||
s.tx = s.b.BatchTx()
|
s.tx = s.b.BatchTx()
|
||||||
s.tx.Lock()
|
s.tx.Lock()
|
||||||
s.saveIndex()
|
|
||||||
|
|
||||||
s.txnID = rand.Int63()
|
s.txnID = rand.Int63()
|
||||||
return s.txnID
|
return s.txnID
|
||||||
|
|
@ -203,6 +203,14 @@ func (s *store) txnEnd(txnID int64) error {
|
||||||
return ErrTxnIDMismatch
|
return ErrTxnIDMismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// only update index if the txn modifies the mvcc state.
|
||||||
|
// read only txn might execute with one write txn concurrently,
|
||||||
|
// it should not write its index to mvcc.
|
||||||
|
if s.txnModify {
|
||||||
|
s.saveIndex()
|
||||||
|
}
|
||||||
|
s.txnModify = false
|
||||||
|
|
||||||
s.tx.Unlock()
|
s.tx.Unlock()
|
||||||
if s.currentRev.sub != 0 {
|
if s.currentRev.sub != 0 {
|
||||||
s.currentRev.main += 1
|
s.currentRev.main += 1
|
||||||
|
|
@ -314,20 +322,23 @@ func (s *store) Compact(rev int64) (<-chan struct{}, error) {
|
||||||
return ch, nil
|
return ch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) Hash() (uint32, int64, error) {
|
// DefaultIgnores is a map of keys to ignore in hash checking.
|
||||||
s.b.ForceCommit()
|
var DefaultIgnores map[backend.IgnoreKey]struct{}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
DefaultIgnores = map[backend.IgnoreKey]struct{}{
|
||||||
|
// consistent index might be changed due to v2 internal sync, which
|
||||||
|
// is not controllable by the user.
|
||||||
|
{Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) Hash() (uint32, int64, error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
s.b.ForceCommit()
|
||||||
|
|
||||||
// ignore hash consistent index field for now.
|
h, err := s.b.Hash(DefaultIgnores)
|
||||||
// consistent index might be changed due to v2 internal sync, which
|
|
||||||
// is not controllable by the user.
|
|
||||||
ignores := make(map[backend.IgnoreKey]struct{})
|
|
||||||
bk := backend.IgnoreKey{Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}
|
|
||||||
ignores[bk] = struct{}{}
|
|
||||||
|
|
||||||
h, err := s.b.Hash(ignores)
|
|
||||||
rev := s.currentRev.main
|
rev := s.currentRev.main
|
||||||
return h, rev, err
|
return h, rev, err
|
||||||
}
|
}
|
||||||
|
|
@ -369,6 +380,11 @@ func (s *store) restore() error {
|
||||||
|
|
||||||
keyToLease := make(map[string]lease.LeaseID)
|
keyToLease := make(map[string]lease.LeaseID)
|
||||||
|
|
||||||
|
// use an unordered map to hold the temp index data to speed up
|
||||||
|
// the initial key index recovery.
|
||||||
|
// we will convert this unordered map into the tree index later.
|
||||||
|
unordered := make(map[string]*keyIndex, 100000)
|
||||||
|
|
||||||
// restore index
|
// restore index
|
||||||
tx := s.b.BatchTx()
|
tx := s.b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
|
|
@ -391,11 +407,20 @@ func (s *store) restore() error {
|
||||||
// restore index
|
// restore index
|
||||||
switch {
|
switch {
|
||||||
case isTombstone(key):
|
case isTombstone(key):
|
||||||
s.kvindex.Tombstone(kv.Key, rev)
|
if ki, ok := unordered[string(kv.Key)]; ok {
|
||||||
|
ki.tombstone(rev.main, rev.sub)
|
||||||
|
}
|
||||||
delete(keyToLease, string(kv.Key))
|
delete(keyToLease, string(kv.Key))
|
||||||
|
|
||||||
default:
|
default:
|
||||||
s.kvindex.Restore(kv.Key, revision{kv.CreateRevision, 0}, rev, kv.Version)
|
ki, ok := unordered[string(kv.Key)]
|
||||||
|
if ok {
|
||||||
|
ki.put(rev.main, rev.sub)
|
||||||
|
} else {
|
||||||
|
ki = &keyIndex{key: kv.Key}
|
||||||
|
ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version)
|
||||||
|
unordered[string(kv.Key)] = ki
|
||||||
|
}
|
||||||
|
|
||||||
if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease {
|
if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease {
|
||||||
keyToLease[string(kv.Key)] = lid
|
keyToLease[string(kv.Key)] = lid
|
||||||
|
|
@ -408,6 +433,11 @@ func (s *store) restore() error {
|
||||||
s.currentRev = rev
|
s.currentRev = rev
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restore the tree index from the unordered index.
|
||||||
|
for _, v := range unordered {
|
||||||
|
s.kvindex.Insert(v)
|
||||||
|
}
|
||||||
|
|
||||||
// keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
|
// keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
|
||||||
// the correct revision should be set to compaction revision in the case, not the largest revision
|
// the correct revision should be set to compaction revision in the case, not the largest revision
|
||||||
// we have seen.
|
// we have seen.
|
||||||
|
|
@ -509,23 +539,18 @@ func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) put(key, value []byte, leaseID lease.LeaseID) {
|
func (s *store) put(key, value []byte, leaseID lease.LeaseID) {
|
||||||
|
s.txnModify = true
|
||||||
|
|
||||||
rev := s.currentRev.main + 1
|
rev := s.currentRev.main + 1
|
||||||
c := rev
|
c := rev
|
||||||
oldLease := lease.NoLease
|
oldLease := lease.NoLease
|
||||||
|
|
||||||
// if the key exists before, use its previous created and
|
// if the key exists before, use its previous created and
|
||||||
// get its previous leaseID
|
// get its previous leaseID
|
||||||
grev, created, ver, err := s.kvindex.Get(key, rev)
|
_, created, ver, err := s.kvindex.Get(key, rev)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c = created.main
|
c = created.main
|
||||||
ibytes := newRevBytes()
|
oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)})
|
||||||
revToBytes(grev, ibytes)
|
|
||||||
_, vs := s.tx.UnsafeRange(keyBucketName, ibytes, nil, 0)
|
|
||||||
var kv mvccpb.KeyValue
|
|
||||||
if err = kv.Unmarshal(vs[0]); err != nil {
|
|
||||||
plog.Fatalf("cannot unmarshal value: %v", err)
|
|
||||||
}
|
|
||||||
oldLease = lease.LeaseID(kv.Lease)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ibytes := newRevBytes()
|
ibytes := newRevBytes()
|
||||||
|
|
@ -575,6 +600,8 @@ func (s *store) put(key, value []byte, leaseID lease.LeaseID) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) deleteRange(key, end []byte) int64 {
|
func (s *store) deleteRange(key, end []byte) int64 {
|
||||||
|
s.txnModify = true
|
||||||
|
|
||||||
rrev := s.currentRev.main
|
rrev := s.currentRev.main
|
||||||
if s.currentRev.sub > 0 {
|
if s.currentRev.sub > 0 {
|
||||||
rrev += 1
|
rrev += 1
|
||||||
|
|
@ -615,17 +642,11 @@ func (s *store) delete(key []byte, rev revision) {
|
||||||
s.changes = append(s.changes, kv)
|
s.changes = append(s.changes, kv)
|
||||||
s.currentRev.sub += 1
|
s.currentRev.sub += 1
|
||||||
|
|
||||||
ibytes = newRevBytes()
|
item := lease.LeaseItem{Key: string(key)}
|
||||||
revToBytes(rev, ibytes)
|
leaseID := s.le.GetLease(item)
|
||||||
_, vs := s.tx.UnsafeRange(keyBucketName, ibytes, nil, 0)
|
|
||||||
|
|
||||||
kv.Reset()
|
if leaseID != lease.NoLease {
|
||||||
if err = kv.Unmarshal(vs[0]); err != nil {
|
err = s.le.Detach(leaseID, []lease.LeaseItem{item})
|
||||||
plog.Fatalf("cannot unmarshal value: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if lease.LeaseID(kv.Lease) != lease.NoLease {
|
|
||||||
err = s.le.Detach(lease.LeaseID(kv.Lease), []lease.LeaseItem{{Key: string(kv.Key)}})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("cannot detach %v", err)
|
plog.Errorf("cannot detach %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,9 @@ var _ = math.Inf
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.ProtoPackageIsVersion1
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type Event_EventType int32
|
type Event_EventType int32
|
||||||
|
|
||||||
|
|
@ -103,91 +105,91 @@ func init() {
|
||||||
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
||||||
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
||||||
}
|
}
|
||||||
func (m *KeyValue) Marshal() (data []byte, err error) {
|
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *KeyValue) MarshalTo(data []byte) (int, error) {
|
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Key) > 0 {
|
if len(m.Key) > 0 {
|
||||||
data[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(len(m.Key)))
|
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
|
||||||
i += copy(data[i:], m.Key)
|
i += copy(dAtA[i:], m.Key)
|
||||||
}
|
}
|
||||||
if m.CreateRevision != 0 {
|
if m.CreateRevision != 0 {
|
||||||
data[i] = 0x10
|
dAtA[i] = 0x10
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.CreateRevision))
|
i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
|
||||||
}
|
}
|
||||||
if m.ModRevision != 0 {
|
if m.ModRevision != 0 {
|
||||||
data[i] = 0x18
|
dAtA[i] = 0x18
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.ModRevision))
|
i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
|
||||||
}
|
}
|
||||||
if m.Version != 0 {
|
if m.Version != 0 {
|
||||||
data[i] = 0x20
|
dAtA[i] = 0x20
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.Version))
|
i = encodeVarintKv(dAtA, i, uint64(m.Version))
|
||||||
}
|
}
|
||||||
if len(m.Value) > 0 {
|
if len(m.Value) > 0 {
|
||||||
data[i] = 0x2a
|
dAtA[i] = 0x2a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(len(m.Value)))
|
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
|
||||||
i += copy(data[i:], m.Value)
|
i += copy(dAtA[i:], m.Value)
|
||||||
}
|
}
|
||||||
if m.Lease != 0 {
|
if m.Lease != 0 {
|
||||||
data[i] = 0x30
|
dAtA[i] = 0x30
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.Lease))
|
i = encodeVarintKv(dAtA, i, uint64(m.Lease))
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) Marshal() (data []byte, err error) {
|
func (m *Event) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
data = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
n, err := m.MarshalTo(data)
|
n, err := m.MarshalTo(dAtA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return data[:n], nil
|
return dAtA[:n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) MarshalTo(data []byte) (int, error) {
|
func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
||||||
var i int
|
var i int
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if m.Type != 0 {
|
if m.Type != 0 {
|
||||||
data[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.Type))
|
i = encodeVarintKv(dAtA, i, uint64(m.Type))
|
||||||
}
|
}
|
||||||
if m.Kv != nil {
|
if m.Kv != nil {
|
||||||
data[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.Kv.Size()))
|
i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
|
||||||
n1, err := m.Kv.MarshalTo(data[i:])
|
n1, err := m.Kv.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
}
|
}
|
||||||
if m.PrevKv != nil {
|
if m.PrevKv != nil {
|
||||||
data[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintKv(data, i, uint64(m.PrevKv.Size()))
|
i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
|
||||||
n2, err := m.PrevKv.MarshalTo(data[i:])
|
n2, err := m.PrevKv.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -196,31 +198,31 @@ func (m *Event) MarshalTo(data []byte) (int, error) {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Kv(data []byte, offset int, v uint64) int {
|
func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
data[offset+4] = uint8(v >> 32)
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
data[offset+5] = uint8(v >> 40)
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
data[offset+6] = uint8(v >> 48)
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
data[offset+7] = uint8(v >> 56)
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
return offset + 8
|
return offset + 8
|
||||||
}
|
}
|
||||||
func encodeFixed32Kv(data []byte, offset int, v uint32) int {
|
func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int {
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
data[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
data[offset+2] = uint8(v >> 16)
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
data[offset+3] = uint8(v >> 24)
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
return offset + 4
|
return offset + 4
|
||||||
}
|
}
|
||||||
func encodeVarintKv(data []byte, offset int, v uint64) int {
|
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
data[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
func (m *KeyValue) Size() (n int) {
|
func (m *KeyValue) Size() (n int) {
|
||||||
|
|
@ -279,8 +281,8 @@ func sovKv(x uint64) (n int) {
|
||||||
func sozKv(x uint64) (n int) {
|
func sozKv(x uint64) (n int) {
|
||||||
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
func (m *KeyValue) Unmarshal(data []byte) error {
|
func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -292,7 +294,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -320,7 +322,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -334,7 +336,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
|
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Key == nil {
|
if m.Key == nil {
|
||||||
m.Key = []byte{}
|
m.Key = []byte{}
|
||||||
}
|
}
|
||||||
|
|
@ -351,7 +353,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.CreateRevision |= (int64(b) & 0x7F) << shift
|
m.CreateRevision |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -370,7 +372,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ModRevision |= (int64(b) & 0x7F) << shift
|
m.ModRevision |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -389,7 +391,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Version |= (int64(b) & 0x7F) << shift
|
m.Version |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -408,7 +410,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
byteLen |= (int(b) & 0x7F) << shift
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -422,7 +424,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
|
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||||
if m.Value == nil {
|
if m.Value == nil {
|
||||||
m.Value = []byte{}
|
m.Value = []byte{}
|
||||||
}
|
}
|
||||||
|
|
@ -439,7 +441,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Lease |= (int64(b) & 0x7F) << shift
|
m.Lease |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -448,7 +450,7 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipKv(data[iNdEx:])
|
skippy, err := skipKv(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -467,8 +469,8 @@ func (m *KeyValue) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *Event) Unmarshal(data []byte) error {
|
func (m *Event) Unmarshal(dAtA []byte) error {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
|
|
@ -480,7 +482,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -508,7 +510,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.Type |= (Event_EventType(b) & 0x7F) << shift
|
m.Type |= (Event_EventType(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -527,7 +529,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -544,7 +546,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
if m.Kv == nil {
|
if m.Kv == nil {
|
||||||
m.Kv = &KeyValue{}
|
m.Kv = &KeyValue{}
|
||||||
}
|
}
|
||||||
if err := m.Kv.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
|
@ -560,7 +562,7 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
msglen |= (int(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -577,13 +579,13 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
if m.PrevKv == nil {
|
if m.PrevKv == nil {
|
||||||
m.PrevKv = &KeyValue{}
|
m.PrevKv = &KeyValue{}
|
||||||
}
|
}
|
||||||
if err := m.PrevKv.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipKv(data[iNdEx:])
|
skippy, err := skipKv(dAtA[iNdEx:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -602,8 +604,8 @@ func (m *Event) Unmarshal(data []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func skipKv(data []byte) (n int, err error) {
|
func skipKv(dAtA []byte) (n int, err error) {
|
||||||
l := len(data)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
var wire uint64
|
var wire uint64
|
||||||
|
|
@ -614,7 +616,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -632,7 +634,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
iNdEx++
|
iNdEx++
|
||||||
if data[iNdEx-1] < 0x80 {
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -649,7 +651,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
length |= (int(b) & 0x7F) << shift
|
length |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -672,7 +674,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||||
if iNdEx >= l {
|
if iNdEx >= l {
|
||||||
return 0, io.ErrUnexpectedEOF
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b := data[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
|
|
@ -683,7 +685,7 @@ func skipKv(data []byte) (n int, err error) {
|
||||||
if innerWireType == 4 {
|
if innerWireType == 4 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next, err := skipKv(data[start:])
|
next, err := skipKv(dAtA[start:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -707,6 +709,8 @@ var (
|
||||||
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
|
||||||
|
|
||||||
var fileDescriptorKv = []byte{
|
var fileDescriptorKv = []byte{
|
||||||
// 303 bytes of a gzipped FileDescriptorProto
|
// 303 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,7 @@ message Event {
|
||||||
// A DELETE/EXPIRE event contains the deleted key with
|
// A DELETE/EXPIRE event contains the deleted key with
|
||||||
// its modification revision set to the revision of deletion.
|
// its modification revision set to the revision of deletion.
|
||||||
KeyValue kv = 2;
|
KeyValue kv = 2;
|
||||||
|
|
||||||
// prev_kv holds the key-value pair before the event happens.
|
// prev_kv holds the key-value pair before the event happens.
|
||||||
KeyValue prev_kv = 3;
|
KeyValue prev_kv = 3;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type watchable interface {
|
type watchable interface {
|
||||||
watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse) (*watcher, cancelFunc)
|
watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
|
||||||
progress(w *watcher)
|
progress(w *watcher)
|
||||||
rev() int64
|
rev() int64
|
||||||
}
|
}
|
||||||
|
|
@ -185,7 +185,7 @@ func (s *watchableStore) NewWatchStream() WatchStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse) (*watcher, cancelFunc) {
|
func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
|
@ -195,6 +195,7 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c
|
||||||
minRev: startRev,
|
minRev: startRev,
|
||||||
id: id,
|
id: id,
|
||||||
ch: ch,
|
ch: ch,
|
||||||
|
fcs: fcs,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.store.mu.Lock()
|
s.store.mu.Lock()
|
||||||
|
|
@ -325,10 +326,9 @@ func (s *watchableStore) moveVictims() (moved int) {
|
||||||
for w, eb := range wb {
|
for w, eb := range wb {
|
||||||
// watcher has observed the store up to, but not including, w.minRev
|
// watcher has observed the store up to, but not including, w.minRev
|
||||||
rev := w.minRev - 1
|
rev := w.minRev - 1
|
||||||
select {
|
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
|
||||||
case w.ch <- WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}:
|
|
||||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||||
default:
|
} else {
|
||||||
if newVictim == nil {
|
if newVictim == nil {
|
||||||
newVictim = make(watcherBatch)
|
newVictim = make(watcherBatch)
|
||||||
}
|
}
|
||||||
|
|
@ -419,10 +419,9 @@ func (s *watchableStore) syncWatchers() {
|
||||||
w.minRev = eb.moreRev
|
w.minRev = eb.moreRev
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
|
||||||
case w.ch <- WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}:
|
|
||||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||||
default:
|
} else {
|
||||||
if victims == nil {
|
if victims == nil {
|
||||||
victims = make(watcherBatch)
|
victims = make(watcherBatch)
|
||||||
}
|
}
|
||||||
|
|
@ -480,10 +479,10 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
|
||||||
if eb.revs != 1 {
|
if eb.revs != 1 {
|
||||||
plog.Panicf("unexpected multiple revisions in notification")
|
plog.Panicf("unexpected multiple revisions in notification")
|
||||||
}
|
}
|
||||||
select {
|
|
||||||
case w.ch <- WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}:
|
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
|
||||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||||
default:
|
} else {
|
||||||
// move slow watcher to victims
|
// move slow watcher to victims
|
||||||
w.minRev = rev + 1
|
w.minRev = rev + 1
|
||||||
if victim == nil {
|
if victim == nil {
|
||||||
|
|
@ -516,12 +515,9 @@ func (s *watchableStore) progress(w *watcher) {
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
if _, ok := s.synced.watchers[w]; ok {
|
if _, ok := s.synced.watchers[w]; ok {
|
||||||
select {
|
w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
|
||||||
case w.ch <- WatchResponse{WatchID: w.id, Revision: s.rev()}:
|
// If the ch is full, this watcher is receiving events.
|
||||||
default:
|
// We do not need to send progress at all.
|
||||||
// If the ch is full, this watcher is receiving events.
|
|
||||||
// We do not need to send progress at all.
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -542,7 +538,40 @@ type watcher struct {
|
||||||
minRev int64
|
minRev int64
|
||||||
id WatchID
|
id WatchID
|
||||||
|
|
||||||
|
fcs []FilterFunc
|
||||||
// a chan to send out the watch response.
|
// a chan to send out the watch response.
|
||||||
// The chan might be shared with other watchers.
|
// The chan might be shared with other watchers.
|
||||||
ch chan<- WatchResponse
|
ch chan<- WatchResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *watcher) send(wr WatchResponse) bool {
|
||||||
|
progressEvent := len(wr.Events) == 0
|
||||||
|
|
||||||
|
if len(w.fcs) != 0 {
|
||||||
|
ne := make([]mvccpb.Event, 0, len(wr.Events))
|
||||||
|
for i := range wr.Events {
|
||||||
|
filtered := false
|
||||||
|
for _, filter := range w.fcs {
|
||||||
|
if filter(wr.Events[i]) {
|
||||||
|
filtered = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !filtered {
|
||||||
|
ne = append(ne, wr.Events[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wr.Events = ne
|
||||||
|
}
|
||||||
|
|
||||||
|
// if all events are filtered out, we should send nothing.
|
||||||
|
if !progressEvent && len(wr.Events) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case w.ch <- wr:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,9 @@ var (
|
||||||
|
|
||||||
type WatchID int64
|
type WatchID int64
|
||||||
|
|
||||||
|
// FilterFunc returns true if the given event should be filtered out.
|
||||||
|
type FilterFunc func(e mvccpb.Event) bool
|
||||||
|
|
||||||
type WatchStream interface {
|
type WatchStream interface {
|
||||||
// Watch creates a watcher. The watcher watches the events happening or
|
// Watch creates a watcher. The watcher watches the events happening or
|
||||||
// happened on the given key or range [key, end) from the given startRev.
|
// happened on the given key or range [key, end) from the given startRev.
|
||||||
|
|
@ -38,7 +41,7 @@ type WatchStream interface {
|
||||||
// The returned `id` is the ID of this watcher. It appears as WatchID
|
// The returned `id` is the ID of this watcher. It appears as WatchID
|
||||||
// in events that are sent to the created watcher through stream channel.
|
// in events that are sent to the created watcher through stream channel.
|
||||||
//
|
//
|
||||||
Watch(key, end []byte, startRev int64) WatchID
|
Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID
|
||||||
|
|
||||||
// Chan returns a chan. All watch response will be sent to the returned chan.
|
// Chan returns a chan. All watch response will be sent to the returned chan.
|
||||||
Chan() <-chan WatchResponse
|
Chan() <-chan WatchResponse
|
||||||
|
|
@ -96,7 +99,7 @@ type watchStream struct {
|
||||||
|
|
||||||
// Watch creates a new watcher in the stream and returns its WatchID.
|
// Watch creates a new watcher in the stream and returns its WatchID.
|
||||||
// TODO: return error if ws is closed?
|
// TODO: return error if ws is closed?
|
||||||
func (ws *watchStream) Watch(key, end []byte, startRev int64) WatchID {
|
func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID {
|
||||||
// prevent wrong range where key >= end lexicographically
|
// prevent wrong range where key >= end lexicographically
|
||||||
// watch request with 'WithFromKey' has empty-byte range end
|
// watch request with 'WithFromKey' has empty-byte range end
|
||||||
if len(end) != 0 && bytes.Compare(key, end) != -1 {
|
if len(end) != 0 && bytes.Compare(key, end) != -1 {
|
||||||
|
|
@ -112,7 +115,7 @@ func (ws *watchStream) Watch(key, end []byte, startRev int64) WatchID {
|
||||||
id := ws.nextID
|
id := ws.nextID
|
||||||
ws.nextID++
|
ws.nextID++
|
||||||
|
|
||||||
w, c := ws.watchable.watch(key, end, startRev, id, ws.ch)
|
w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
|
||||||
|
|
||||||
ws.cancels[id] = c
|
ws.cancels[id] = c
|
||||||
ws.watchers[id] = w
|
ws.watchers[id] = w
|
||||||
|
|
|
||||||
|
|
@ -78,6 +78,10 @@ func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
|
||||||
// newWatcherBatch maps watchers to their matched events. It enables quick
|
// newWatcherBatch maps watchers to their matched events. It enables quick
|
||||||
// events look up by watcher.
|
// events look up by watcher.
|
||||||
func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
|
func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
|
||||||
|
if len(wg.watchers) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
wb := make(watcherBatch)
|
wb := make(watcherBatch)
|
||||||
for _, ev := range evs {
|
for _, ev := range evs {
|
||||||
for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
|
for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
|
||||||
|
|
|
||||||
|
|
@ -447,6 +447,9 @@ func (ivt *IntervalTree) Contains(iv Interval) bool {
|
||||||
|
|
||||||
// Stab returns a slice with all elements in the tree intersecting the interval.
|
// Stab returns a slice with all elements in the tree intersecting the interval.
|
||||||
func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
|
func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
|
||||||
|
if ivt.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
|
f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
|
||||||
ivt.Visit(iv, f)
|
ivt.Visit(iv, f)
|
||||||
return ivs
|
return ivs
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package cpuutil provides facilities for detecting cpu-specific features.
|
||||||
|
package cpuutil
|
||||||
|
|
@ -0,0 +1,36 @@
|
||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package cpuutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const intWidth int = int(unsafe.Sizeof(0))
|
||||||
|
|
||||||
|
var byteOrder binary.ByteOrder
|
||||||
|
|
||||||
|
// ByteOrder returns the byte order for the CPU's native endianness.
|
||||||
|
func ByteOrder() binary.ByteOrder { return byteOrder }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var i int = 0x1
|
||||||
|
if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
|
||||||
|
byteOrder = binary.BigEndian
|
||||||
|
} else {
|
||||||
|
byteOrder = binary.LittleEndian
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -19,7 +19,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/coreos/pkg/capnslog"
|
"github.com/coreos/pkg/capnslog"
|
||||||
|
|
@ -33,13 +33,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsDirWriteable checks if dir is writable by writing and removing a file
|
// IsDirWriteable checks if dir is writable by writing and removing a file
|
||||||
// to dir. It returns nil if dir is writable.
|
// to dir. It returns nil if dir is writable.
|
||||||
func IsDirWriteable(dir string) error {
|
func IsDirWriteable(dir string) error {
|
||||||
f := path.Join(dir, ".touch")
|
f := filepath.Join(dir, ".touch")
|
||||||
if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
|
if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ package fileutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -45,7 +45,7 @@ func purgeFile(dirname string, suffix string, max uint, interval time.Duration,
|
||||||
sort.Strings(newfnames)
|
sort.Strings(newfnames)
|
||||||
fnames = newfnames
|
fnames = newfnames
|
||||||
for len(newfnames) > int(max) {
|
for len(newfnames) > int(max) {
|
||||||
f := path.Join(dirname, newfnames[0])
|
f := filepath.Join(dirname, newfnames[0])
|
||||||
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
|
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RequestCanceler(rt http.RoundTripper, req *http.Request) func() {
|
func RequestCanceler(req *http.Request) func() {
|
||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
req.Cancel = ch
|
req.Cancel = ch
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
// Copyright (C) 2016 Arista Networks, Inc.
|
||||||
|
// Use of this source code is governed by the Apache License 2.0
|
||||||
|
// that can be found in the COPYING file.
|
||||||
|
|
||||||
|
// This file is intentionally empty.
|
||||||
|
// It's a workaround for https://github.com/golang/go/issues/15006
|
||||||
|
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package monotime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time represents a point in monotonic time
|
||||||
|
type Time uint64
|
||||||
|
|
||||||
|
func (t Time) Add(d time.Duration) Time {
|
||||||
|
return Time(uint64(t) + uint64(d.Nanoseconds()))
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,24 @@
|
||||||
|
// Copyright (C) 2016 Arista Networks, Inc.
|
||||||
|
// Use of this source code is governed by the Apache License 2.0
|
||||||
|
// that can be found in the COPYING file.
|
||||||
|
|
||||||
|
// Package monotime provides a fast monotonic clock source.
|
||||||
|
package monotime
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "unsafe" // required to use //go:linkname
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname nanotime runtime.nanotime
|
||||||
|
func nanotime() int64
|
||||||
|
|
||||||
|
// Now returns the current time in nanoseconds from a monotonic clock.
|
||||||
|
// The time returned is based on some arbitrary platform-specific point in the
|
||||||
|
// past. The time returned is guaranteed to increase monotonically at a
|
||||||
|
// constant rate, unlike time.Now() from the Go standard library, which may
|
||||||
|
// slow down, speed up, jump forward or backward, due to NTP activity or leap
|
||||||
|
// seconds.
|
||||||
|
func Now() Time {
|
||||||
|
return Time(nanotime())
|
||||||
|
}
|
||||||
|
|
@ -43,17 +43,24 @@ func RecoverPort(port int) error {
|
||||||
|
|
||||||
// SetLatency adds latency in millisecond scale with random variations.
|
// SetLatency adds latency in millisecond scale with random variations.
|
||||||
func SetLatency(ms, rv int) error {
|
func SetLatency(ms, rv int) error {
|
||||||
|
ifces, err := GetDefaultInterfaces()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if rv > ms {
|
if rv > ms {
|
||||||
rv = 1
|
rv = 1
|
||||||
}
|
}
|
||||||
cmdStr := fmt.Sprintf("sudo tc qdisc add dev eth0 root netem delay %dms %dms distribution normal", ms, rv)
|
for ifce := range ifces {
|
||||||
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
|
cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
|
||||||
if err != nil {
|
|
||||||
// the rule has already been added. Overwrite it.
|
|
||||||
cmdStr = fmt.Sprintf("sudo tc qdisc change dev eth0 root netem delay %dms %dms distribution normal", ms, rv)
|
|
||||||
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
// the rule has already been added. Overwrite it.
|
||||||
|
cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
|
||||||
|
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -61,6 +68,15 @@ func SetLatency(ms, rv int) error {
|
||||||
|
|
||||||
// RemoveLatency resets latency configurations.
|
// RemoveLatency resets latency configurations.
|
||||||
func RemoveLatency() error {
|
func RemoveLatency() error {
|
||||||
_, err := exec.Command("/bin/sh", "-c", "sudo tc qdisc del dev eth0 root netem").Output()
|
ifces, err := GetDefaultInterfaces()
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for ifce := range ifces {
|
||||||
|
_, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,22 +20,27 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
"github.com/coreos/pkg/capnslog"
|
"github.com/coreos/pkg/capnslog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "netutil")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil")
|
||||||
|
|
||||||
// indirection for testing
|
// indirection for testing
|
||||||
resolveTCPAddr = net.ResolveTCPAddr
|
resolveTCPAddr = net.ResolveTCPAddr
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const retryInterval = time.Second
|
||||||
|
|
||||||
// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
|
// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
|
||||||
// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
|
// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
|
||||||
// are resolved.
|
// are resolved.
|
||||||
func resolveTCPAddrs(urls [][]url.URL) ([][]url.URL, error) {
|
func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) {
|
||||||
newurls := make([][]url.URL, 0)
|
newurls := make([][]url.URL, 0)
|
||||||
for _, us := range urls {
|
for _, us := range urls {
|
||||||
nus := make([]url.URL, len(us))
|
nus := make([]url.URL, len(us))
|
||||||
|
|
@ -47,37 +52,52 @@ func resolveTCPAddrs(urls [][]url.URL) ([][]url.URL, error) {
|
||||||
nus[i] = *nu
|
nus[i] = *nu
|
||||||
}
|
}
|
||||||
for i, u := range nus {
|
for i, u := range nus {
|
||||||
host, _, err := net.SplitHostPort(u.Host)
|
h, err := resolveURL(ctx, u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if host == "localhost" {
|
if h != "" {
|
||||||
continue
|
nus[i].Host = h
|
||||||
}
|
}
|
||||||
if net.ParseIP(host) != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tcpAddr, err := resolveTCPAddr("tcp", u.Host)
|
|
||||||
if err != nil {
|
|
||||||
plog.Errorf("could not resolve host %s", u.Host)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
|
|
||||||
nus[i].Host = tcpAddr.String()
|
|
||||||
}
|
}
|
||||||
newurls = append(newurls, nus)
|
newurls = append(newurls, nus)
|
||||||
}
|
}
|
||||||
return newurls, nil
|
return newurls, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resolveURL(ctx context.Context, u url.URL) (string, error) {
|
||||||
|
for ctx.Err() == nil {
|
||||||
|
host, _, err := net.SplitHostPort(u.Host)
|
||||||
|
if err != nil {
|
||||||
|
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if host == "localhost" || net.ParseIP(host) != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
tcpAddr, err := resolveTCPAddr("tcp", u.Host)
|
||||||
|
if err == nil {
|
||||||
|
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
|
||||||
|
return tcpAddr.String(), nil
|
||||||
|
}
|
||||||
|
plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
plog.Errorf("could not resolve host %s", u.Host)
|
||||||
|
return "", err
|
||||||
|
case <-time.After(retryInterval):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
// urlsEqual checks equality of url.URLS between two arrays.
|
// urlsEqual checks equality of url.URLS between two arrays.
|
||||||
// This check pass even if an URL is in hostname and opposite is in IP address.
|
// This check pass even if an URL is in hostname and opposite is in IP address.
|
||||||
func urlsEqual(a []url.URL, b []url.URL) bool {
|
func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) bool {
|
||||||
if len(a) != len(b) {
|
if len(a) != len(b) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
urls, err := resolveTCPAddrs([][]url.URL{a, b})
|
urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
@ -93,7 +113,7 @@ func urlsEqual(a []url.URL, b []url.URL) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func URLStringsEqual(a []string, b []string) bool {
|
func URLStringsEqual(ctx context.Context, a []string, b []string) bool {
|
||||||
if len(a) != len(b) {
|
if len(a) != len(b) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
@ -114,7 +134,7 @@ func URLStringsEqual(a []string, b []string) bool {
|
||||||
urlsB = append(urlsB, *u)
|
urlsB = append(urlsB, *u)
|
||||||
}
|
}
|
||||||
|
|
||||||
return urlsEqual(urlsA, urlsB)
|
return urlsEqual(ctx, urlsA, urlsB)
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsNetworkTimeoutError(err error) bool {
|
func IsNetworkTimeoutError(err error) bool {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,33 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package netutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetDefaultHost fetches the a resolvable name that corresponds
|
||||||
|
// to the machine's default routable interface
|
||||||
|
func GetDefaultHost() (string, error) {
|
||||||
|
return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultInterfaces fetches the device name of default routable interface.
|
||||||
|
func GetDefaultInterfaces() (map[string]uint8, error) {
|
||||||
|
return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,250 @@
|
||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package netutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"sort"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/cpuutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errNoDefaultRoute = fmt.Errorf("could not find default route")
|
||||||
|
var errNoDefaultHost = fmt.Errorf("could not find default host")
|
||||||
|
var errNoDefaultInterface = fmt.Errorf("could not find default interface")
|
||||||
|
|
||||||
|
// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
|
||||||
|
// An IPv4 address is preferred to an IPv6 address for backward compatibility.
|
||||||
|
func GetDefaultHost() (string, error) {
|
||||||
|
rmsgs, rerr := getDefaultRoutes()
|
||||||
|
if rerr != nil {
|
||||||
|
return "", rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
// prioritize IPv4
|
||||||
|
if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
|
||||||
|
if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
|
||||||
|
return host, err
|
||||||
|
}
|
||||||
|
delete(rmsgs, syscall.AF_INET)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sort so choice is deterministic
|
||||||
|
var families []int
|
||||||
|
for family := range rmsgs {
|
||||||
|
families = append(families, int(family))
|
||||||
|
}
|
||||||
|
sort.Ints(families)
|
||||||
|
|
||||||
|
for _, f := range families {
|
||||||
|
family := uint8(f)
|
||||||
|
if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
|
||||||
|
return host, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errNoDefaultHost
|
||||||
|
}
|
||||||
|
|
||||||
|
func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
|
||||||
|
host, oif, err := parsePREFSRC(rmsg)
|
||||||
|
if host != "" || err != nil {
|
||||||
|
return host, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// prefsrc not detected, fall back to getting address from iface
|
||||||
|
ifmsg, ierr := getIfaceAddr(oif, family)
|
||||||
|
if ierr != nil {
|
||||||
|
return "", ierr
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
|
||||||
|
if aerr != nil {
|
||||||
|
return "", aerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range attrs {
|
||||||
|
// search for RTA_DST because ipv6 doesn't have RTA_SRC
|
||||||
|
if attr.Attr.Type == syscall.RTA_DST {
|
||||||
|
return net.IP(attr.Value).String(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
|
||||||
|
dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
|
||||||
|
if msgErr != nil {
|
||||||
|
return nil, msgErr
|
||||||
|
}
|
||||||
|
|
||||||
|
routes := make(map[uint8]*syscall.NetlinkMessage)
|
||||||
|
rtmsg := syscall.RtMsg{}
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Header.Type != syscall.RTM_NEWROUTE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
|
||||||
|
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
|
||||||
|
// zero-length Dst_len implies default route
|
||||||
|
msg := m
|
||||||
|
routes[rtmsg.Family] = &msg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(routes) > 0 {
|
||||||
|
return routes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errNoDefaultRoute
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used to get an address of interface.
|
||||||
|
func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
|
||||||
|
dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
|
||||||
|
if msgErr != nil {
|
||||||
|
return nil, msgErr
|
||||||
|
}
|
||||||
|
|
||||||
|
ifaddrmsg := syscall.IfAddrmsg{}
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Header.Type != syscall.RTM_NEWADDR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
|
||||||
|
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ifaddrmsg.Index == idx {
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not find address for interface index %v", idx)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used to get a name of interface.
|
||||||
|
func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
|
||||||
|
dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
|
||||||
|
if msgErr != nil {
|
||||||
|
return nil, msgErr
|
||||||
|
}
|
||||||
|
|
||||||
|
ifinfomsg := syscall.IfInfomsg{}
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Header.Type != syscall.RTM_NEWLINK {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
|
||||||
|
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ifinfomsg.Index == int32(idx) {
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not find link for interface index %v", idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
|
||||||
|
func GetDefaultInterfaces() (map[string]uint8, error) {
|
||||||
|
interfaces := make(map[string]uint8)
|
||||||
|
rmsgs, rerr := getDefaultRoutes()
|
||||||
|
if rerr != nil {
|
||||||
|
return interfaces, rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for family, rmsg := range rmsgs {
|
||||||
|
_, oif, err := parsePREFSRC(rmsg)
|
||||||
|
if err != nil {
|
||||||
|
return interfaces, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ifmsg, ierr := getIfaceLink(oif)
|
||||||
|
if ierr != nil {
|
||||||
|
return interfaces, ierr
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
|
||||||
|
if aerr != nil {
|
||||||
|
return interfaces, aerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range attrs {
|
||||||
|
if attr.Attr.Type == syscall.IFLA_IFNAME {
|
||||||
|
// key is an interface name
|
||||||
|
// possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
|
||||||
|
interfaces[string(attr.Value[:len(attr.Value)-1])] += family
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(interfaces) > 0 {
|
||||||
|
return interfaces, nil
|
||||||
|
}
|
||||||
|
return interfaces, errNoDefaultInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
|
||||||
|
func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
|
||||||
|
var attrs []syscall.NetlinkRouteAttr
|
||||||
|
attrs, err = syscall.ParseNetlinkRouteAttr(m)
|
||||||
|
if err != nil {
|
||||||
|
return "", 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range attrs {
|
||||||
|
if attr.Attr.Type == syscall.RTA_PREFSRC {
|
||||||
|
host = net.IP(attr.Value).String()
|
||||||
|
}
|
||||||
|
if attr.Attr.Type == syscall.RTA_OIF {
|
||||||
|
oif = cpuutil.ByteOrder().Uint32(attr.Value)
|
||||||
|
}
|
||||||
|
if host != "" && oif != uint32(0) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oif == 0 {
|
||||||
|
err = errNoDefaultRoute
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
@ -18,7 +18,7 @@ package pbutil
|
||||||
import "github.com/coreos/pkg/capnslog"
|
import "github.com/coreos/pkg/capnslog"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags")
|
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil")
|
||||||
)
|
)
|
||||||
|
|
||||||
type Marshaler interface {
|
type Marshaler interface {
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue