Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Formats:
parquet
Languages:
code
Size:
10K - 100K
ArXiv:
License:
Dataset Viewer
id
stringlengths 95
167
| text
stringlengths 69
15.9k
| title
stringclasses 1
value |
|---|---|---|
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/periodic.go#L98-L172
|
func (pc *Periodic) Run() {
compactInterval := pc.getCompactInterval()
retryInterval := pc.getRetryInterval()
retentions := pc.getRetentions()
go func() {
lastSuccess := pc.clock.Now()
baseInterval := pc.period
for {
pc.revs = append(pc.revs, pc.rg.Rev())
if len(pc.revs) > retentions {
pc.revs = pc.revs[1:] // pc.revs[0] is always the rev at pc.period ago
}
select {
case <-pc.ctx.Done():
return
case <-pc.clock.After(retryInterval):
pc.mu.Lock()
p := pc.paused
pc.mu.Unlock()
if p {
continue
}
}
if pc.clock.Now().Sub(lastSuccess) < baseInterval {
continue
}
// wait up to initial given period
if baseInterval == pc.period {
baseInterval = compactInterval
}
rev := pc.revs[0]
if pc.lg != nil {
pc.lg.Info(
"starting auto periodic compaction",
zap.Int64("revision", rev),
zap.Duration("compact-period", pc.period),
)
} else {
plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, pc.period)
}
_, err := pc.c.Compact(pc.ctx, &pb.CompactionRequest{Revision: rev})
if err == nil || err == mvcc.ErrCompacted {
if pc.lg != nil {
pc.lg.Info(
"completed auto periodic compaction",
zap.Int64("revision", rev),
zap.Duration("compact-period", pc.period),
zap.Duration("took", time.Since(lastSuccess)),
)
} else {
plog.Noticef("Finished auto-compaction at revision %d", rev)
}
lastSuccess = pc.clock.Now()
} else {
if pc.lg != nil {
pc.lg.Warn(
"failed auto periodic compaction",
zap.Int64("revision", rev),
zap.Duration("compact-period", pc.period),
zap.Duration("retry-interval", retryInterval),
zap.Error(err),
)
} else {
plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
plog.Noticef("Retry after %v", retryInterval)
}
}
}
}()
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/register.go#L35-L62
|
func Register(c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} {
rm := rate.NewLimiter(rate.Limit(registerRetryRate), registerRetryRate)
donec := make(chan struct{})
go func() {
defer close(donec)
for rm.Wait(c.Ctx()) == nil {
ss, err := registerSession(c, prefix, addr, ttl)
if err != nil {
plog.Warningf("failed to create a session %v", err)
continue
}
select {
case <-c.Ctx().Done():
ss.Close()
return
case <-ss.Done():
plog.Warning("session expired; possible network partition or server restart")
plog.Warning("creating a new session to rejoin")
continue
}
}
}()
return donec
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry_interceptor.go#L325-L329
|
func withRetryPolicy(rp retryPolicy) retryOption {
return retryOption{applyFunc: func(o *options) {
o.retryPolicy = rp
}}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server_access_control.go#L37-L49
|
func (ac *AccessController) OriginAllowed(origin string) bool {
ac.corsMu.RLock()
defer ac.corsMu.RUnlock()
if len(ac.CORS) == 0 { // allow all
return true
}
_, ok := ac.CORS["*"]
if ok {
return true
}
_, ok = ac.CORS[origin]
return ok
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/balancer.go#L174-L225
|
func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
bb.mu.Lock()
defer bb.mu.Unlock()
old, ok := bb.scToSt[sc]
if !ok {
bb.lg.Warn(
"state change for an unknown subconn",
zap.String("balancer-id", bb.id),
zap.String("subconn", scToString(sc)),
zap.String("state", s.String()),
)
return
}
bb.lg.Info(
"state changed",
zap.String("balancer-id", bb.id),
zap.Bool("connected", s == connectivity.Ready),
zap.String("subconn", scToString(sc)),
zap.String("address", bb.scToAddr[sc].Addr),
zap.String("old-state", old.String()),
zap.String("new-state", s.String()),
)
bb.scToSt[sc] = s
switch s {
case connectivity.Idle:
sc.Connect()
case connectivity.Shutdown:
// When an address was removed by resolver, b called RemoveSubConn but
// kept the sc's state in scToSt. Remove state for this sc here.
delete(bb.scToAddr, sc)
delete(bb.scToSt, sc)
}
oldAggrState := bb.currentState
bb.currentState = bb.csEvltr.recordTransition(old, s)
// Regenerate picker when one of the following happens:
// - this sc became ready from not-ready
// - this sc became not-ready from ready
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
if (s == connectivity.Ready) != (old == connectivity.Ready) ||
(bb.currentState == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
bb.regeneratePicker()
}
bb.currentConn.UpdateBalancerState(bb.currentState, bb.Picker)
return
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/batch.go#L111-L126
|
func (wb *WriteBatch) Delete(k []byte) error {
wb.Lock()
defer wb.Unlock()
if err := wb.txn.Delete(k); err != ErrTxnTooBig {
return err
}
if err := wb.commit(); err != nil {
return err
}
if err := wb.txn.Delete(k); err != nil {
wb.err = err
return err
}
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L1502-L1519
|
func (r *raft) checkQuorumActive() bool {
var act int
r.forEachProgress(func(id uint64, pr *Progress) {
if id == r.id { // self is always active
act++
return
}
if pr.RecentActive && !pr.IsLearner {
act++
}
pr.RecentActive = false
})
return act >= r.quorum()
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/watchable_store.go#L437-L465
|
func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
var victim watcherBatch
for w, eb := range newWatcherBatch(&s.synced, evs) {
if eb.revs != 1 {
if s.store != nil && s.store.lg != nil {
s.store.lg.Panic(
"unexpected multiple revisions in watch notification",
zap.Int("number-of-revisions", eb.revs),
)
} else {
plog.Panicf("unexpected multiple revisions in notification")
}
}
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
pendingEventsGauge.Add(float64(len(eb.evs)))
} else {
// move slow watcher to victims
w.minRev = rev + 1
if victim == nil {
victim = make(watcherBatch)
}
w.victim = true
victim[w] = eb
s.synced.delete(w)
slowWatcherGauge.Inc()
}
}
s.addVictim(victim)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/snapshot/v3_snapshot.go#L156-L205
|
func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
if _, err = os.Stat(dbPath); err != nil {
return ds, err
}
db, err := bolt.Open(dbPath, 0400, &bolt.Options{ReadOnly: true})
if err != nil {
return ds, err
}
defer db.Close()
h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
if err = db.View(func(tx *bolt.Tx) error {
// check snapshot file integrity first
var dbErrStrings []string
for dbErr := range tx.Check() {
dbErrStrings = append(dbErrStrings, dbErr.Error())
}
if len(dbErrStrings) > 0 {
return fmt.Errorf("snapshot file integrity check failed. %d errors found.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings))
}
ds.TotalSize = tx.Size()
c := tx.Cursor()
for next, _ := c.First(); next != nil; next, _ = c.Next() {
b := tx.Bucket(next)
if b == nil {
return fmt.Errorf("cannot get hash of bucket %s", string(next))
}
h.Write(next)
iskeyb := (string(next) == "key")
b.ForEach(func(k, v []byte) error {
h.Write(k)
h.Write(v)
if iskeyb {
rev := bytesToRev(k)
ds.Revision = rev.main
}
ds.TotalKey++
return nil
})
}
return nil
}); err != nil {
return ds, err
}
ds.Hash = h.Sum32()
return ds, nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/mutex.go#L115-L117
|
func NewLocker(s *Session, pfx string) sync.Locker {
return &lockerMutex{NewMutex(s, pfx)}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/util.go#L34-L36
|
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/builder.go#L179-L182
|
func (b *Builder) ReachedCapacity(cap int64) bool {
estimateSz := b.buf.Len() + 8 /* empty header */ + 4*len(b.restarts) + 8 // 8 = end of buf offset + len(restarts).
return int64(estimateSz) > cap
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/del_command.go#L31-L42
|
func NewDelCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "del [options] <key> [range_end]",
Short: "Removes the specified key or range of keys [key, range_end)",
Run: delCommandFunc,
}
cmd.Flags().BoolVar(&delPrefix, "prefix", false, "delete keys with matching prefix")
cmd.Flags().BoolVar(&delPrevKV, "prev-kv", false, "return deleted key-value pairs")
cmd.Flags().BoolVar(&delFromKey, "from-key", false, "delete keys that are greater than or equal to the given key using byte compare")
return cmd
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L84-L121
|
func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error) {
secure := false
for _, cu := range m.Etcd.AdvertiseClientURLs {
var u *url.URL
u, err = url.Parse(cu)
if err != nil {
return nil, err
}
if u.Scheme == "https" { // TODO: handle unix
secure = true
}
}
cfg = &clientv3.Config{
Endpoints: []string{m.EtcdClientEndpoint},
DialTimeout: 10 * time.Second,
DialOptions: opts,
}
if secure {
// assume save TLS assets are already stord on disk
tlsInfo := transport.TLSInfo{
CertFile: m.ClientCertPath,
KeyFile: m.ClientKeyPath,
TrustedCAFile: m.ClientTrustedCAPath,
// TODO: remove this with generated certs
// only need it for auto TLS
InsecureSkipVerify: true,
}
var tlsConfig *tls.Config
tlsConfig, err = tlsInfo.ClientConfig()
if err != nil {
return nil, err
}
cfg.TLS = tlsConfig
}
return cfg, err
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/lessor.go#L859-L866
|
func (l *Lease) Remaining() time.Duration {
l.expiryMu.RLock()
defer l.expiryMu.RUnlock()
if l.expiry.IsZero() {
return time.Duration(math.MaxInt64)
}
return time.Until(l.expiry)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L293-L295
|
func (rn *RawNode) ReadIndex(rctx []byte) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/config_logging_journal_unix.go#L29-L35
|
func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
jw, err := logutil.NewJournalWriter(os.Stderr)
if err != nil {
return nil, fmt.Errorf("can't find journal (%v)", err)
}
return zapcore.AddSync(jw), nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/role_command.go#L106-L117
|
func roleAddCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
ExitWithError(ExitBadArgs, fmt.Errorf("role add command requires role name as its argument"))
}
resp, err := mustClientFromCmd(cmd).Auth.RoleAdd(context.TODO(), args[0])
if err != nil {
ExitWithError(ExitError, err)
}
display.RoleAdd(args[0], *resp)
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/iterator.go#L61-L67
|
func (v *ValueStruct) Decode(b []byte) {
v.Meta = b[0]
v.UserMeta = b[1]
var sz int
v.ExpiresAt, sz = binary.Uvarint(b[2:])
v.Value = b[2+sz:]
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/urls.go#L63-L65
|
func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue))
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/lease.go#L472-L491
|
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
if err != nil {
cancel()
return nil, err
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
go l.sendKeepAliveLoop(stream)
return stream, nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L156-L161
|
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.Endpoints = eps
c.resolverGroup.SetEndpoints(eps)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3rpc/watch.go#L572-L584
|
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
for _, ft := range creq.Filters {
switch ft {
case pb.WatchCreateRequest_NOPUT:
filters = append(filters, filterNoPut)
case pb.WatchCreateRequest_NODELETE:
filters = append(filters, filterNoDelete)
default:
}
}
return filters
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L285-L295
|
func (t *Throttle) Finish() error {
t.wg.Wait()
close(t.ch)
close(t.errCh)
for err := range t.errCh {
if err != nil {
return err
}
}
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L207-L225
|
func (rn *RawNode) HasReady() bool {
r := rn.raft
if !r.softState().equal(rn.prevSoftSt) {
return true
}
if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
return true
}
if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
return true
}
if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
return true
}
if len(r.readStates) != 0 {
return true
}
return false
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/stringutil/rand.go#L23-L34
|
func UniqueStrings(slen uint, n int) (ss []string) {
exist := make(map[string]struct{})
ss = make([]string, 0, n)
for len(ss) < n {
s := randString(slen)
if _, ok := exist[s]; !ok {
ss = append(ss, s)
exist[s] = struct{}{}
}
}
return ss
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/set.go#L101-L108
|
func (us *unsafeSet) Copy() Set {
cp := NewUnsafeSet()
for val := range us.d {
cp.Add(val)
}
return cp
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/txn_command.go#L45-L66
|
func txnCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 0 {
ExitWithError(ExitBadArgs, fmt.Errorf("txn command does not accept argument"))
}
reader := bufio.NewReader(os.Stdin)
txn := mustClientFromCmd(cmd).Txn(context.Background())
promptInteractive("compares:")
txn.If(readCompares(reader)...)
promptInteractive("success requests (get, put, del):")
txn.Then(readOps(reader)...)
promptInteractive("failure requests (get, put, del):")
txn.Else(readOps(reader)...)
resp, err := txn.Commit()
if err != nil {
ExitWithError(ExitError, err)
}
display.Txn(*resp)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/membership/member.go#L78-L83
|
func (m *Member) PickPeerURL() string {
if len(m.PeerURLs) == 0 {
panic("member should always have some peer url")
}
return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/selective_string.go#L77-L88
|
func (ss *SelectiveStringsValue) Set(s string) error {
vs := strings.Split(s, ",")
for i := range vs {
if _, ok := ss.valids[vs[i]]; ok {
ss.vs = append(ss.vs, vs[i])
} else {
return fmt.Errorf("invalid value %q", vs[i])
}
}
sort.Strings(ss.vs)
return nil
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/level_handler.go#L261-L281
|
func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
s.RLock()
defer s.RUnlock()
tables := make([]*table.Table, 0, len(s.tables))
for _, t := range s.tables {
if opt.pickTable(t) {
tables = append(tables, t)
}
}
if len(tables) == 0 {
return iters
}
if s.level == 0 {
// Remember to add in reverse order!
// The newer table at the end of s.tables should be added first as it takes precedence.
return appendIteratorsReversed(iters, tables, opt.Reverse)
}
return append(iters, table.NewConcatIterator(tables, opt.Reverse))
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/alarm_command.go#L25-L35
|
func NewAlarmCommand() *cobra.Command {
ac := &cobra.Command{
Use: "alarm <subcommand>",
Short: "Alarm related commands",
}
ac.AddCommand(NewAlarmDisarmCommand())
ac.AddCommand(NewAlarmListCommand())
return ac
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/periodic.go#L213-L217
|
func (pc *Periodic) Resume() {
pc.mu.Lock()
pc.paused = false
pc.mu.Unlock()
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L687-L835
|
func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L148-L160
|
func (o *oracle) hasConflict(txn *Txn) bool {
if len(txn.reads) == 0 {
return false
}
for _, ro := range txn.reads {
// A commit at the read timestamp is expected.
// But, any commit after the read timestamp should cause a conflict.
if ts, has := o.commits[ro]; has && ts > txn.readTs {
return true
}
}
return false
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/snap/snapshotter.go#L152-L215
|
func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) {
b, err := ioutil.ReadFile(snapname)
if err != nil {
if lg != nil {
lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err))
} else {
plog.Errorf("cannot read file %v: %v", snapname, err)
}
return nil, err
}
if len(b) == 0 {
if lg != nil {
lg.Warn("failed to read empty snapshot file", zap.String("path", snapname))
} else {
plog.Errorf("unexpected empty snapshot")
}
return nil, ErrEmptySnapshot
}
var serializedSnap snappb.Snapshot
if err = serializedSnap.Unmarshal(b); err != nil {
if lg != nil {
lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err))
} else {
plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
}
return nil, err
}
if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
if lg != nil {
lg.Warn("failed to read empty snapshot data", zap.String("path", snapname))
} else {
plog.Errorf("unexpected empty snapshot")
}
return nil, ErrEmptySnapshot
}
crc := crc32.Update(0, crcTable, serializedSnap.Data)
if crc != serializedSnap.Crc {
if lg != nil {
lg.Warn("snap file is corrupt",
zap.String("path", snapname),
zap.Uint32("prev-crc", serializedSnap.Crc),
zap.Uint32("new-crc", crc),
)
} else {
plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
}
return nil, ErrCRCMismatch
}
var snap raftpb.Snapshot
if err = snap.Unmarshal(serializedSnap.Data); err != nil {
if lg != nil {
lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err))
} else {
plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
}
return nil, err
}
return &snap, nil
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/manifest.go#L329-L380
|
func ReplayManifestFile(fp *os.File) (ret Manifest, truncOffset int64, err error) {
r := countingReader{wrapped: bufio.NewReader(fp)}
var magicBuf [8]byte
if _, err := io.ReadFull(&r, magicBuf[:]); err != nil {
return Manifest{}, 0, errBadMagic
}
if !bytes.Equal(magicBuf[0:4], magicText[:]) {
return Manifest{}, 0, errBadMagic
}
version := binary.BigEndian.Uint32(magicBuf[4:8])
if version != magicVersion {
return Manifest{}, 0,
fmt.Errorf("manifest has unsupported version: %d (we support %d)", version, magicVersion)
}
build := createManifest()
var offset int64
for {
offset = r.count
var lenCrcBuf [8]byte
_, err := io.ReadFull(&r, lenCrcBuf[:])
if err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
}
return Manifest{}, 0, err
}
length := binary.BigEndian.Uint32(lenCrcBuf[0:4])
var buf = make([]byte, length)
if _, err := io.ReadFull(&r, buf); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
}
return Manifest{}, 0, err
}
if crc32.Checksum(buf, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(lenCrcBuf[4:8]) {
break
}
var changeSet pb.ManifestChangeSet
if err := changeSet.Unmarshal(buf); err != nil {
return Manifest{}, 0, err
}
if err := applyChangeSet(&build, &changeSet); err != nil {
return Manifest{}, 0, err
}
}
return build, offset, err
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/proxy/server.go#L201-L280
|
func NewServer(cfg ServerConfig) Server {
s := &server{
lg: cfg.Logger,
from: cfg.From,
to: cfg.To,
tlsInfo: cfg.TLSInfo,
dialTimeout: cfg.DialTimeout,
bufferSize: cfg.BufferSize,
retryInterval: cfg.RetryInterval,
readyc: make(chan struct{}),
donec: make(chan struct{}),
errc: make(chan error, 16),
pauseAcceptc: make(chan struct{}),
pauseTxc: make(chan struct{}),
pauseRxc: make(chan struct{}),
}
_, fromPort, err := net.SplitHostPort(cfg.From.Host)
if err == nil {
s.fromPort, _ = strconv.Atoi(fromPort)
}
var toPort string
_, toPort, err = net.SplitHostPort(cfg.To.Host)
if err == nil {
s.toPort, _ = strconv.Atoi(toPort)
}
if s.dialTimeout == 0 {
s.dialTimeout = defaultDialTimeout
}
if s.bufferSize == 0 {
s.bufferSize = defaultBufferSize
}
if s.retryInterval == 0 {
s.retryInterval = defaultRetryInterval
}
if s.lg == nil {
s.lg = defaultLogger
}
close(s.pauseAcceptc)
close(s.pauseTxc)
close(s.pauseRxc)
if strings.HasPrefix(s.from.Scheme, "http") {
s.from.Scheme = "tcp"
}
if strings.HasPrefix(s.to.Scheme, "http") {
s.to.Scheme = "tcp"
}
addr := fmt.Sprintf(":%d", s.fromPort)
if s.fromPort == 0 { // unix
addr = s.from.Host
}
var ln net.Listener
if !s.tlsInfo.Empty() {
ln, err = transport.NewListener(addr, s.from.Scheme, &s.tlsInfo)
} else {
ln, err = net.Listen(s.from.Scheme, addr)
}
if err != nil {
s.errc <- err
s.Close()
return s
}
s.listener = ln
s.closeWg.Add(1)
go s.listenAndServe()
s.lg.Info("started proxying", zap.String("from", s.From()), zap.String("to", s.To()))
return s
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L82-L103
|
func checkPostResponse(resp *http.Response, body []byte, req *http.Request, to types.ID) error {
switch resp.StatusCode {
case http.StatusPreconditionFailed:
switch strings.TrimSuffix(string(body), "\n") {
case errIncompatibleVersion.Error():
plog.Errorf("request sent was ignored by peer %s (server version incompatible)", to)
return errIncompatibleVersion
case errClusterIDMismatch.Error():
plog.Errorf("request sent was ignored (cluster ID mismatch: remote[%s]=%s, local=%s)",
to, resp.Header.Get("X-Etcd-Cluster-ID"), req.Header.Get("X-Etcd-Cluster-ID"))
return errClusterIDMismatch
default:
return fmt.Errorf("unhandled error %q when precondition failed", string(body))
}
case http.StatusForbidden:
return errMemberRemoved
case http.StatusNoContent:
return nil
default:
return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String())
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/event_history.go#L43-L54
|
func (eh *EventHistory) addEvent(e *Event) *Event {
eh.rwl.Lock()
defer eh.rwl.Unlock()
eh.Queue.insert(e)
eh.LastIndex = e.Index()
eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index()
return e
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/ioutil/pagewriter.go#L43-L51
|
func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
return &PageWriter{
w: w,
pageOffset: pageOffset,
pageBytes: pageBytes,
buf: make([]byte, defaultBufferBytes+pageBytes),
bufWatermarkBytes: defaultBufferBytes,
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/runner/lock_racer_command.go#L29-L37
|
func NewLockRacerCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "lock-racer [name of lock (defaults to 'racers')]",
Short: "Performs lock race operation",
Run: runRacerFunc,
}
cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections")
return cmd
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/backend/batch_tx.go#L88-L90
|
func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
t.unsafePut(bucketName, key, value, false)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/watch_command.go#L48-L86
|
func watchCommandFunc(c *cli.Context, ki client.KeysAPI) {
if len(c.Args()) == 0 {
handleError(c, ExitBadArgs, errors.New("key required"))
}
key := c.Args()[0]
recursive := c.Bool("recursive")
forever := c.Bool("forever")
index := c.Int("after-index")
stop := false
w := ki.Watcher(key, &client.WatcherOptions{AfterIndex: uint64(index), Recursive: recursive})
sigch := make(chan os.Signal, 1)
signal.Notify(sigch, os.Interrupt)
go func() {
<-sigch
os.Exit(0)
}()
for !stop {
resp, err := w.Next(context.TODO())
if err != nil {
handleError(c, ExitServerError, err)
}
if resp.Node.Dir {
continue
}
if recursive {
fmt.Printf("[%s] %s\n", resp.Action, resp.Node.Key)
}
printResponseKey(resp, c.GlobalString("output"))
if !forever {
stop = true
}
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/queue.go#L43-L77
|
func (q *Queue) Dequeue() (string, error) {
// TODO: fewer round trips by fetching more than one key
resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...)
if err != nil {
return "", err
}
kv, err := claimFirstKey(q.client, resp.Kvs)
if err != nil {
return "", err
} else if kv != nil {
return string(kv.Value), nil
} else if resp.More {
// missed some items, retry to read in more
return q.Dequeue()
}
// nothing yet; wait on elements
ev, err := WaitPrefixEvents(
q.client,
q.keyPrefix,
resp.Header.Revision,
[]mvccpb.Event_EventType{mvccpb.PUT})
if err != nil {
return "", err
}
ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision)
if err != nil {
return "", err
} else if !ok {
return q.Dequeue()
}
return string(ev.Kv.Value), err
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/builder.go#L47-L52
|
func (h header) Encode(b []byte) {
binary.BigEndian.PutUint16(b[0:2], h.plen)
binary.BigEndian.PutUint16(b[2:4], h.klen)
binary.BigEndian.PutUint16(b[4:6], h.vlen)
binary.BigEndian.PutUint32(b[6:10], h.prev)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/namespace/kv.go#L32-L34
|
func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
return &kvPrefix{kv, prefix}
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L590-L607
|
func (txn *Txn) Commit() error {
txn.commitPrecheck() // Precheck before discarding txn.
defer txn.Discard()
if len(txn.writes) == 0 {
return nil // Nothing to do.
}
txnCb, err := txn.commitAndSend()
if err != nil {
return err
}
// If batchSet failed, LSM would not have been updated. So, no need to rollback anything.
// TODO: What if some of the txns successfully make it to value log, but others fail.
// Nothing gets updated to LSM, until a restart happens.
return txnCb()
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/key_index.go#L127-L145
|
func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error {
if ki.isEmpty() {
if lg != nil {
lg.Panic(
"'tombstone' got an unexpected empty keyIndex",
zap.String("key", string(ki.key)),
)
} else {
plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key))
}
}
if ki.generations[len(ki.generations)-1].isEmpty() {
return ErrRevisionNotFound
}
ki.put(lg, main, sub)
ki.generations = append(ki.generations, generation{})
keysGauge.Dec()
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L167-L183
|
func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
if cc.NodeID == None {
return &pb.ConfState{Nodes: rn.raft.nodes(), Learners: rn.raft.learnerNodes()}
}
switch cc.Type {
case pb.ConfChangeAddNode:
rn.raft.addNode(cc.NodeID)
case pb.ConfChangeAddLearnerNode:
rn.raft.addLearner(cc.NodeID)
case pb.ConfChangeRemoveNode:
rn.raft.removeNode(cc.NodeID)
case pb.ConfChangeUpdateNode:
default:
panic("unexpected conf type")
}
return &pb.ConfState{Nodes: rn.raft.nodes(), Learners: rn.raft.learnerNodes()}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L37-L39
|
func (m *Member) ElectionTimeout() time.Duration {
return time.Duration(m.Etcd.ElectionTimeoutMs) * time.Millisecond
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/update_dir_command.go#L26-L39
|
func NewUpdateDirCommand() cli.Command {
return cli.Command{
Name: "updatedir",
Usage: "update an existing directory",
ArgsUsage: "<key> <value>",
Flags: []cli.Flag{
cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"},
},
Action: func(c *cli.Context) error {
updatedirCommandFunc(c, mustNewKeyAPI(c))
return nil
},
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/metrics.go#L71-L98
|
func HandleMetrics(mux *http.ServeMux, c *http.Client, eps []string) {
// random shuffle endpoints
r := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
if len(eps) > 1 {
eps = shuffleEndpoints(r, eps)
}
pathMetrics := etcdhttp.PathMetrics
mux.HandleFunc(pathMetrics, func(w http.ResponseWriter, r *http.Request) {
target := fmt.Sprintf("%s%s", eps[0], pathMetrics)
if !strings.HasPrefix(target, "http") {
scheme := "http"
if r.TLS != nil {
scheme = "https"
}
target = fmt.Sprintf("%s://%s", scheme, target)
}
resp, err := c.Get(target)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
}
defer resp.Body.Close()
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
body, _ := ioutil.ReadAll(resp.Body)
fmt.Fprintf(w, "%s", body)
})
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go#L141-L289
|
func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/index.go#L157-L185
|
func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
keyi := &keyIndex{key: key}
ti.RLock()
defer ti.RUnlock()
if end == nil {
item := ti.tree.Get(keyi)
if item == nil {
return nil
}
keyi = item.(*keyIndex)
return keyi.since(ti.lg, rev)
}
endi := &keyIndex{key: end}
var revs []revision
ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
if len(endi.key) > 0 && !item.Less(endi) {
return false
}
curKeyi := item.(*keyIndex)
revs = append(revs, curKeyi.since(ti.lg, rev)...)
return true
})
sort.Sort(revisions(revs))
return revs
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/unique_strings.go#L57-L66
|
func NewUniqueStringsValue(s string) (us *UniqueStringsValue) {
us = &UniqueStringsValue{Values: make(map[string]struct{})}
if s == "" {
return us
}
if err := us.Set(s); err != nil {
plog.Panicf("new UniqueStringsValue should never fail: %v", err)
}
return us
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/etcdhttp/metrics.go#L37-L40
|
func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
mux.Handle(PathMetrics, promhttp.Handler())
mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/auth/options.go#L63-L94
|
func (opts *jwtOptions) Parse(optMap map[string]string) error {
var err error
if ttl := optMap[optTTL]; ttl != "" {
opts.TTL, err = time.ParseDuration(ttl)
if err != nil {
return err
}
}
if file := optMap[optPublicKey]; file != "" {
opts.PublicKey, err = ioutil.ReadFile(file)
if err != nil {
return err
}
}
if file := optMap[optPrivateKey]; file != "" {
opts.PrivateKey, err = ioutil.ReadFile(file)
if err != nil {
return err
}
}
// signing method is a required field
method := optMap[optSignMethod]
opts.SignMethod = jwt.GetSigningMethod(method)
if opts.SignMethod == nil {
return ErrInvalidAuthMethod
}
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/expect/expect.go#L134-L136
|
func (ep *ExpectProcess) Signal(sig os.Signal) error {
return ep.cmd.Process.Signal(sig)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/key_index.go#L372-L381
|
func (g *generation) walk(f func(rev revision) bool) int {
l := len(g.revs)
for i := range g.revs {
ok := f(g.revs[l-i-1])
if !ok {
return l - i - 1
}
}
return -1
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L191-L195
|
func NewCloser(initial int) *Closer {
ret := &Closer{closed: make(chan struct{})}
ret.waiting.Add(initial)
return ret
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/ready_wait.go#L21-L30
|
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
select {
case <-ready:
return nil
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-clientCtx.Done():
return clientCtx.Err()
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L111-L114
|
func NewCtxClient(ctx context.Context) *Client {
cctx, cancel := context.WithCancel(ctx)
return &Client{ctx: cctx, cancel: cancel}
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/watermark.go#L107-L109
|
func (w *WaterMark) SetDoneUntil(val uint64) {
atomic.StoreUint64(&w.doneUntil, val)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/raftexample/raft.go#L318-L323
|
func (rc *raftNode) stop() {
rc.stopHTTP()
close(rc.commitC)
close(rc.errorC)
rc.node.Stop()
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L2047-L2116
|
func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
cc.NodeID = raft.None
s.r.ApplyConfChange(cc)
return false, err
}
lg := s.getLogger()
*confState = *s.r.ApplyConfChange(cc)
switch cc.Type {
case raftpb.ConfChangeAddNode:
m := new(membership.Member)
if err := json.Unmarshal(cc.Context, m); err != nil {
if lg != nil {
lg.Panic("failed to unmarshal member", zap.Error(err))
} else {
plog.Panicf("unmarshal member should never fail: %v", err)
}
}
if cc.NodeID != uint64(m.ID) {
if lg != nil {
lg.Panic(
"got different member ID",
zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
zap.String("member-id-from-message", m.ID.String()),
)
} else {
plog.Panicf("nodeID should always be equal to member ID")
}
}
s.cluster.AddMember(m)
if m.ID != s.id {
s.r.transport.AddPeer(m.ID, m.PeerURLs)
}
case raftpb.ConfChangeRemoveNode:
id := types.ID(cc.NodeID)
s.cluster.RemoveMember(id)
if id == s.id {
return true, nil
}
s.r.transport.RemovePeer(id)
case raftpb.ConfChangeUpdateNode:
m := new(membership.Member)
if err := json.Unmarshal(cc.Context, m); err != nil {
if lg != nil {
lg.Panic("failed to unmarshal member", zap.Error(err))
} else {
plog.Panicf("unmarshal member should never fail: %v", err)
}
}
if cc.NodeID != uint64(m.ID) {
if lg != nil {
lg.Panic(
"got different member ID",
zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
zap.String("member-id-from-message", m.ID.String()),
)
} else {
plog.Panicf("nodeID should always be equal to member ID")
}
}
s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
if m.ID != s.id {
s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
}
}
return false, nil
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L415-L421
|
func (itr *Iterator) Seek(key []byte) {
if !itr.reversed {
itr.seek(key)
} else {
itr.seekForPrev(key)
}
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/iterator.go#L59-L61
|
func (item *Item) String() string {
return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L542-L558
|
func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
// Attach the commit as min(to.matched, r.committed).
// When the leader sends out heartbeat message,
// the receiver(follower) might not be matched with the leader
// or it might not have all the committed entries.
// The leader MUST NOT forward the follower's commit to
// an unmatched index.
commit := min(r.getProgress(to).Match, r.raftLog.committed)
m := pb.Message{
To: to,
Type: pb.MsgHeartbeat,
Commit: commit,
Context: ctx,
}
r.send(m)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/raft.go#L150-L154
|
func (r *raftNode) tick() {
r.tickMu.Lock()
r.Tick()
r.tickMu.Unlock()
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/grpc1.7-health.go#L112-L146
|
func NewGRPC17Health(
eps []string,
timeout time.Duration,
dialFunc DialFunc,
) *GRPC17Health {
notifyCh := make(chan []grpc.Address)
addrs := eps2addrs(eps)
hb := &GRPC17Health{
addrs: addrs,
eps: eps,
notifyCh: notifyCh,
readyc: make(chan struct{}),
healthCheck: func(ep string) (bool, error) { return grpcHealthCheck(ep, dialFunc) },
unhealthyHostPorts: make(map[string]time.Time),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
updateAddrsC: make(chan NotifyMsg),
hostPort2ep: getHostPort2ep(eps),
}
if timeout < minHealthRetryDuration {
timeout = minHealthRetryDuration
}
hb.healthCheckTimeout = timeout
close(hb.downc)
go hb.updateNotifyLoop()
hb.wg.Add(1)
go func() {
defer hb.wg.Done()
hb.updateUnhealthy()
}()
return hb
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/key.go#L62-L73
|
func putNewKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (int64, error) {
cmp := v3.Compare(v3.Version(key), "=", 0)
req := v3.OpPut(key, val, v3.WithLease(leaseID))
txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit()
if err != nil {
return 0, err
}
if !txnresp.Succeeded {
return 0, ErrKeyExists
}
return txnresp.Header.Revision, nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/logutil/zap_raft.go#L27-L36
|
func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
if lcfg == nil {
return nil, errors.New("nil zap.Config")
}
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
if err != nil {
return nil, err
}
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/role_command.go#L134-L146
|
func roleGetCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
ExitWithError(ExitBadArgs, fmt.Errorf("role get command requires role name as its argument"))
}
name := args[0]
resp, err := mustClientFromCmd(cmd).Auth.RoleGet(context.TODO(), name)
if err != nil {
ExitWithError(ExitError, err)
}
display.RoleGet(name, *resp)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2auth/auth.go#L521-L534
|
func (r Role) merge(lg *zap.Logger, n Role) (Role, error) {
var out Role
var err error
if r.Role != n.Role {
return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role)
}
out.Role = r.Role
out.Permissions, err = r.Permissions.Grant(n.Grant)
if err != nil {
return out, err
}
out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke)
return out, err
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/auth_command.go#L25-L35
|
func NewAuthCommand() *cobra.Command {
ac := &cobra.Command{
Use: "auth <enable or disable>",
Short: "Enable or disable authentication",
}
ac.AddCommand(newAuthEnableCommand())
ac.AddCommand(newAuthDisableCommand())
return ac
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/picker/roundrobin_balanced.go#L28-L40
|
func NewRoundrobinBalanced(
lg *zap.Logger,
scs []balancer.SubConn,
addrToSc map[resolver.Address]balancer.SubConn,
scToAddr map[balancer.SubConn]resolver.Address,
) Picker {
return &rrBalanced{
lg: lg,
scs: scs,
addrToSc: addrToSc,
scToAddr: scToAddr,
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/corrupt.go#L34-L140
|
func (s *EtcdServer) CheckInitialHashKV() error {
if !s.Cfg.InitialCorruptCheck {
return nil
}
lg := s.getLogger()
if lg != nil {
lg.Info(
"starting initial corruption check",
zap.String("local-member-id", s.ID().String()),
zap.Duration("timeout", s.Cfg.ReqTimeout()),
)
} else {
plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
}
h, rev, crev, err := s.kv.HashByRev(0)
if err != nil {
return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
}
peers := s.getPeerHashKVs(rev)
mismatch := 0
for _, p := range peers {
if p.resp != nil {
peerID := types.ID(p.resp.Header.MemberId)
fields := []zap.Field{
zap.String("local-member-id", s.ID().String()),
zap.Int64("local-member-revision", rev),
zap.Int64("local-member-compact-revision", crev),
zap.Uint32("local-member-hash", h),
zap.String("remote-peer-id", peerID.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Int64("remote-peer-revision", p.resp.Header.Revision),
zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision),
zap.Uint32("remote-peer-hash", p.resp.Hash),
}
if h != p.resp.Hash {
if crev == p.resp.CompactRevision {
if lg != nil {
lg.Warn("found different hash values from remote peer", fields...)
} else {
plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
}
mismatch++
} else {
if lg != nil {
lg.Warn("found different compact revision values from remote peer", fields...)
} else {
plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
}
}
}
continue
}
if p.err != nil {
switch p.err {
case rpctypes.ErrFutureRev:
if lg != nil {
lg.Warn(
"cannot fetch hash from slow remote peer",
zap.String("local-member-id", s.ID().String()),
zap.Int64("local-member-revision", rev),
zap.Int64("local-member-compact-revision", crev),
zap.Uint32("local-member-hash", h),
zap.String("remote-peer-id", p.id.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Error(err),
)
} else {
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
}
case rpctypes.ErrCompacted:
if lg != nil {
lg.Warn(
"cannot fetch hash from remote peer; local member is behind",
zap.String("local-member-id", s.ID().String()),
zap.Int64("local-member-revision", rev),
zap.Int64("local-member-compact-revision", crev),
zap.Uint32("local-member-hash", h),
zap.String("remote-peer-id", p.id.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Error(err),
)
} else {
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
}
}
}
}
if mismatch > 0 {
return fmt.Errorf("%s found data inconsistency with peers", s.ID())
}
if lg != nil {
lg.Info(
"initial corruption checking passed; no corruption",
zap.String("local-member-id", s.ID().String()),
)
} else {
plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
}
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L1737-L2204
|
func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/cluster_util.go#L181-L223
|
func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version {
var cv *semver.Version
lv := semver.Must(semver.NewVersion(version.Version))
for mid, ver := range vers {
if ver == nil {
return nil
}
v, err := semver.NewVersion(ver.Server)
if err != nil {
if lg != nil {
lg.Warn(
"failed to parse server version of remote member",
zap.String("remote-member-id", mid),
zap.String("remote-member-version", ver.Server),
zap.Error(err),
)
} else {
plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
}
return nil
}
if lv.LessThan(*v) {
if lg != nil {
lg.Warn(
"leader found higher-versioned member",
zap.String("local-member-version", lv.String()),
zap.String("remote-member-id", mid),
zap.String("remote-member-version", ver.Server),
)
} else {
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
plog.Warningf("member %s has a higher version %s", mid, ver.Server)
}
}
if cv == nil {
cv = v
} else if v.LessThan(*cv) {
cv = v
}
}
return cv
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/value.go#L1333-L1339
|
func (vlog *valueLog) encodedDiscardStats() []byte {
vlog.lfDiscardStats.Lock()
defer vlog.lfDiscardStats.Unlock()
encodedStats, _ := json.Marshal(vlog.lfDiscardStats.m)
return encodedStats
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/watcher.go#L121-L129
|
func (w *watcher) post(wr *pb.WatchResponse) bool {
select {
case w.wps.watchCh <- wr:
case <-time.After(50 * time.Millisecond):
w.wps.cancel()
return false
}
return true
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/fileutil/purge.go#L32-L88
|
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
errC := make(chan error, 1)
go func() {
for {
fnames, err := ReadDir(dirname)
if err != nil {
errC <- err
return
}
newfnames := make([]string, 0)
for _, fname := range fnames {
if strings.HasSuffix(fname, suffix) {
newfnames = append(newfnames, fname)
}
}
sort.Strings(newfnames)
fnames = newfnames
for len(newfnames) > int(max) {
f := filepath.Join(dirname, newfnames[0])
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
if err != nil {
break
}
if err = os.Remove(f); err != nil {
errC <- err
return
}
if err = l.Close(); err != nil {
if lg != nil {
lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
} else {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
}
errC <- err
return
}
if lg != nil {
lg.Info("purged", zap.String("path", f))
} else {
plog.Infof("purged file %s successfully", f)
}
newfnames = newfnames[1:]
}
if purgec != nil {
for i := 0; i < len(fnames)-len(newfnames); i++ {
purgec <- fnames[i]
}
}
select {
case <-time.After(interval):
case <-stop:
return
}
}
}()
return errC
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/serve.go#L363-L367
|
func addCORSHeader(w http.ResponseWriter, origin string) {
w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
w.Header().Add("Access-Control-Allow-Origin", origin)
w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/user_command.go#L174-L199
|
func userGetCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
ExitWithError(ExitBadArgs, fmt.Errorf("user get command requires user name as its argument"))
}
name := args[0]
client := mustClientFromCmd(cmd)
resp, err := client.Auth.UserGet(context.TODO(), name)
if err != nil {
ExitWithError(ExitError, err)
}
if userShowDetail {
fmt.Printf("User: %s\n", name)
for _, role := range resp.Roles {
fmt.Printf("\n")
roleResp, err := client.Auth.RoleGet(context.TODO(), role)
if err != nil {
ExitWithError(ExitError, err)
}
display.RoleGet(role, *roleResp)
}
} else {
display.UserGet(name, *resp)
}
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/arena.go#L84-L93
|
func (s *Arena) putVal(v y.ValueStruct) uint32 {
l := uint32(v.EncodedSize())
n := atomic.AddUint32(&s.n, l)
y.AssertTruef(int(n) <= len(s.buf),
"Arena too small, toWrite:%d newTotal:%d limit:%d",
l, n, len(s.buf))
m := n - l
v.Encode(s.buf[m:])
return m
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/schedule/schedule.go#L119-L125
|
func (f *fifo) Stop() {
f.mu.Lock()
f.cancel()
f.cancel = nil
f.mu.Unlock()
<-f.donec
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L277-L619
|
func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
var (
w *wal.WAL
n raft.Node
s *raft.MemoryStorage
id types.ID
cl *membership.RaftCluster
)
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
if cfg.Logger != nil {
cfg.Logger.Warn(
"exceeded recommended requet limit",
zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))),
)
} else {
plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
}
}
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
haveWAL := wal.Exist(cfg.WALDir())
if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
if cfg.Logger != nil {
cfg.Logger.Fatal(
"failed to create snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
} else {
plog.Fatalf("create snapshot directory error: %v", err)
}
}
ss := snap.New(cfg.Logger, cfg.SnapDir())
bepath := cfg.backendPath()
beExist := fileutil.Exist(bepath)
be := openBackend(cfg)
defer func() {
if err != nil {
be.Close()
}
}()
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
if err != nil {
return nil, err
}
var (
remotes []*membership.Member
snapshot *raftpb.Snapshot
)
switch {
case !haveWAL && !cfg.NewCluster:
if err = cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
if gerr != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
}
if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
return nil, fmt.Errorf("incompatible with current running cluster")
}
remotes = existingCluster.Members()
cl.SetID(types.ID(0), existingCluster.ID())
cl.SetStore(st)
cl.SetBackend(be)
id, n, s, w = startNode(cfg, cl, nil)
cl.SetID(id, existingCluster.ID())
case !haveWAL && cfg.NewCluster:
if err = cfg.VerifyBootstrap(); err != nil {
return nil, err
}
cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
m := cl.MemberByName(cfg.Name)
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
var str string
str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
if err != nil {
return nil, &DiscoveryError{Op: "join", Err: err}
}
var urlsmap types.URLsMap
urlsmap, err = types.NewURLsMap(str)
if err != nil {
return nil, err
}
if checkDuplicateURL(urlsmap) {
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
}
if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
return nil, err
}
}
cl.SetStore(st)
cl.SetBackend(be)
id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
cl.SetID(id, cl.ID())
case haveWAL:
if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
}
if cfg.ShouldDiscover() {
if cfg.Logger != nil {
cfg.Logger.Warn(
"discovery token is ignored since cluster already initialized; valid logs are found",
zap.String("wal-dir", cfg.WALDir()),
)
} else {
plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
}
}
snapshot, err = ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
if snapshot != nil {
if err = st.Recovery(snapshot.Data); err != nil {
if cfg.Logger != nil {
cfg.Logger.Panic("failed to recover from snapshot")
} else {
plog.Panicf("recovered store from snapshot error: %v", err)
}
}
if cfg.Logger != nil {
cfg.Logger.Info(
"recovered v2 store from snapshot",
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
)
} else {
plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
}
if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
if cfg.Logger != nil {
cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
} else {
plog.Panicf("recovering backend from snapshot error: %v", err)
}
}
if cfg.Logger != nil {
s1, s2 := be.Size(), be.SizeInUse()
cfg.Logger.Info(
"recovered v3 backend from snapshot",
zap.Int64("backend-size-bytes", s1),
zap.String("backend-size", humanize.Bytes(uint64(s1))),
zap.Int64("backend-size-in-use-bytes", s2),
zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
)
}
}
if !cfg.ForceNewCluster {
id, cl, n, s, w = restartNode(cfg, snapshot)
} else {
id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
}
cl.SetStore(st)
cl.SetBackend(be)
cl.Recover(api.UpdateCapability)
if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
os.RemoveAll(bepath)
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
}
default:
return nil, fmt.Errorf("unsupported bootstrap config")
}
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
}
sstats := stats.NewServerStats(cfg.Name, id.String())
lstats := stats.NewLeaderStats(id.String())
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
srv = &EtcdServer{
readych: make(chan struct{}),
Cfg: cfg,
lgMu: new(sync.RWMutex),
lg: cfg.Logger,
errorc: make(chan error, 1),
v2store: st,
snapshotter: ss,
r: *newRaftNode(
raftNodeConfig{
lg: cfg.Logger,
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
heartbeat: heartbeat,
raftStorage: s,
storage: NewStorage(w, ss),
},
),
id: id,
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
cluster: cl,
stats: sstats,
lstats: lstats,
SyncTicker: time.NewTicker(500 * time.Millisecond),
peerRt: prt,
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
forceVersionC: make(chan struct{}),
AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
}
serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
srv.be = be
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
srv.lessor = lease.NewLessor(srv.getLogger(), srv.be, lease.LessorConfig{MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), CheckpointInterval: cfg.LeaseCheckpointInterval})
srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex)
if beExist {
kvindex := srv.kv.ConsistentIndex()
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
// etcd from pre-3.0 release.
if snapshot != nil && kvindex < snapshot.Metadata.Index {
if kvindex != 0 {
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index)
}
if cfg.Logger != nil {
cfg.Logger.Warn(
"consistent index was never saved",
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
)
} else {
plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
}
}
}
newSrv := srv // since srv == nil in defer if srv is returned as nil
defer func() {
// closing backend without first closing kv can cause
// resumed compactions to fail with closed tx errors
if err != nil {
newSrv.kv.Close()
}
}()
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken,
func(index uint64) <-chan struct{} {
return srv.applyWait.Wait(index)
},
)
if err != nil {
if cfg.Logger != nil {
cfg.Logger.Warn("failed to create token provider", zap.Error(err))
} else {
plog.Errorf("failed to create token provider: %s", err)
}
return nil, err
}
srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost))
if num := cfg.AutoCompactionRetention; num != 0 {
srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
if err != nil {
return nil, err
}
srv.compactor.Run()
}
srv.applyV3Base = srv.newApplierV3Backend()
if err = srv.restoreAlarms(); err != nil {
return nil, err
}
srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
})
// TODO: move transport initialization near the definition of remote
tr := &rafthttp.Transport{
Logger: cfg.Logger,
TLSInfo: cfg.PeerTLSInfo,
DialTimeout: cfg.peerDialTimeout(),
ID: id,
URLs: cfg.PeerURLs,
ClusterID: cl.ID(),
Raft: srv,
Snapshotter: ss,
ServerStats: sstats,
LeaderStats: lstats,
ErrorC: srv.errorc,
}
if err = tr.Start(); err != nil {
return nil, err
}
// add all remotes into transport
for _, m := range remotes {
if m.ID != id {
tr.AddRemote(m.ID, m.PeerURLs)
}
}
for _, m := range cl.Members() {
if m.ID != id {
tr.AddPeer(m.ID, m.PeerURLs)
}
}
srv.r.transport = tr
return srv, nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L206-L217
|
func (m *Member) Compact(rev int64, timeout time.Duration) error {
cli, err := m.CreateEtcdClient()
if err != nil {
return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
}
defer cli.Close()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
_, err = cli.Compact(ctx, rev, clientv3.WithCompactPhysical())
cancel()
return err
}
| |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L212-L216
|
func (n *NetworkTransport) getStreamContext() context.Context {
n.streamCtxLock.RLock()
defer n.streamCtxLock.RUnlock()
return n.streamCtx
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/v3_server.go#L541-L561
|
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
ai, err := s.AuthInfoFromCtx(ctx)
if err != nil {
return err
}
if ai == nil {
// chk expects non-nil AuthInfo; use empty credentials
ai = &auth.AuthInfo{}
}
if err = chk(ai); err != nil {
return err
}
// fetch response for serialized request
get()
// check for stale token revision in case the auth store was updated while
// the request has been handled.
if ai.Revision != 0 && ai.Revision != s.authStore.Revision() {
return auth.ErrAuthOldRevision
}
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L334-L393
|
func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) {
var err *v2error.Error
s.worldLock.Lock()
defer s.worldLock.Unlock()
defer func() {
if err == nil {
s.Stats.Inc(DeleteSuccess)
reportWriteSuccess(Delete)
return
}
s.Stats.Inc(DeleteFail)
reportWriteFailure(Delete)
}()
nodePath = path.Clean(path.Join("/", nodePath))
// we do not allow the user to change "/"
if s.readonlySet.Contains(nodePath) {
return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
}
// recursive implies dir
if recursive {
dir = true
}
n, err := s.internalGet(nodePath)
if err != nil { // if the node does not exist, return error
return nil, err
}
nextIndex := s.CurrentIndex + 1
e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex)
e.EtcdIndex = nextIndex
e.PrevNode = n.Repr(false, false, s.clock)
eNode := e.Node
if n.IsDir() {
eNode.Dir = true
}
callback := func(path string) { // notify function
// notify the watchers with deleted set true
s.WatcherHub.notifyWatchers(e, path, true)
}
err = n.Remove(dir, recursive, callback)
if err != nil {
return nil, err
}
// update etcd index
s.CurrentIndex++
s.WatcherHub.notify(e)
return e, nil
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/iterator.go#L80-L87
|
func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) {
buf.WriteByte(v.Meta)
buf.WriteByte(v.UserMeta)
var enc [binary.MaxVarintLen64]byte
sz := binary.PutUvarint(enc[:], v.ExpiresAt)
buf.Write(enc[:sz])
buf.Write(v.Value)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/watch.go#L483-L647
|
func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
// substreams marked to close but goroutine still running; needed for
// avoiding double-closing recvc on grpc stream teardown
closing := make(map[*watcherStream]struct{})
defer func() {
w.closeErr = closeErr
// shutdown substreams and resuming substreams
for _, ws := range w.substreams {
if _, ok := closing[ws]; !ok {
close(ws.recvc)
closing[ws] = struct{}{}
}
}
for _, ws := range w.resuming {
if _, ok := closing[ws]; ws != nil && !ok {
close(ws.recvc)
closing[ws] = struct{}{}
}
}
w.joinSubstreams()
for range closing {
w.closeSubstream(<-w.closingc)
}
w.wg.Wait()
w.owner.closeStream(w)
}()
// start a stream with the etcd grpc server
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse
for {
select {
// Watch() requested
case req := <-w.reqc:
switch wreq := req.(type) {
case *watchRequest:
outc := make(chan WatchResponse, 1)
// TODO: pass custom watch ID?
ws := &watcherStream{
initReq: *wreq,
id: -1,
outc: outc,
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
}
ws.donec = make(chan struct{})
w.wg.Add(1)
go w.serveSubstream(ws, w.resumec)
// queue up for watcher creation/resume
w.resuming = append(w.resuming, ws)
if len(w.resuming) == 1 {
// head of resume queue, can register a new watcher
wc.Send(ws.initReq.toPB())
}
case *progressRequest:
wc.Send(wreq.toPB())
}
// new events from the watch client
case pbresp := <-w.respc:
if cur == nil || pbresp.Created || pbresp.Canceled {
cur = pbresp
} else if cur != nil && cur.WatchId == pbresp.WatchId {
// merge new events
cur.Events = append(cur.Events, pbresp.Events...)
// update "Fragment" field; last response with "Fragment" == false
cur.Fragment = pbresp.Fragment
}
switch {
case pbresp.Created:
// response to head of queue creation
if ws := w.resuming[0]; ws != nil {
w.addSubstream(pbresp, ws)
w.dispatchEvent(pbresp)
w.resuming[0] = nil
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
// reset for next iteration
cur = nil
case pbresp.Canceled && pbresp.CompactRevision == 0:
delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok {
// signal to stream goroutine to update closingc
close(ws.recvc)
closing[ws] = struct{}{}
}
// reset for next iteration
cur = nil
case cur.Fragment:
// watch response events are still fragmented
// continue to fetch next fragmented event arrival
continue
default:
// dispatch to appropriate watch stream
ok := w.dispatchEvent(cur)
// reset for next iteration
cur = nil
if ok {
break
}
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: pbresp.WatchId,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
cancelSet = make(map[int64]struct{})
case <-w.ctx.Done():
return
case ws := <-w.closingc:
w.closeSubstream(ws)
delete(closing, ws)
// no more watchers on this stream, shutdown
if len(w.substreams)+len(w.resuming) == 0 {
return
}
}
}
}
| |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L74-L95
|
func (itr *blockIterator) Seek(key []byte, whence int) {
itr.err = nil
switch whence {
case origin:
itr.Reset()
case current:
}
var done bool
for itr.Init(); itr.Valid(); itr.Next() {
k := itr.Key()
if y.CompareKeys(k, key) >= 0 {
// We are done as k is >= key.
done = true
break
}
}
if !done {
itr.err = io.EOF
}
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/tools/etcd-dump-logs/main.go#L145-L150
|
func excerpt(str string, pre, suf int) string {
if pre+suf > len(str) {
return fmt.Sprintf("%q", str)
}
return fmt.Sprintf("%q...%q", str[:pre], str[len(str)-suf:])
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/watcher_group.go#L81-L96
|
func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
if len(wg.watchers) == 0 {
return nil
}
wb := make(watcherBatch)
for _, ev := range evs {
for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
if ev.Kv.ModRevision >= w.minRev {
// don't double notify
wb.add(w, ev)
}
}
}
return wb
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/key_index.go#L304-L325
|
func (ki *keyIndex) findGeneration(rev int64) *generation {
lastg := len(ki.generations) - 1
cg := lastg
for cg >= 0 {
if len(ki.generations[cg].revs) == 0 {
cg--
continue
}
g := ki.generations[cg]
if cg != lastg {
if tomb := g.revs[len(g.revs)-1].main; tomb <= rev {
return nil
}
}
if g.revs[0].main <= rev {
return &ki.generations[cg]
}
cg--
}
return nil
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/backend.go#L103-L112
|
func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
var cIndex consistentIndex
kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, &cIndex)
defer kv.Close()
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
return oldbe, nil
}
oldbe.Close()
return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot)
}
| |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L587-L690
|
func mustNewMember(t testing.TB, mcfg memberConfig) *member {
var err error
m := &member{}
peerScheme := schemeFromTLSInfo(mcfg.peerTLS)
clientScheme := schemeFromTLSInfo(mcfg.clientTLS)
pln := newLocalListener(t)
m.PeerListeners = []net.Listener{pln}
m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.PeerTLSInfo = mcfg.peerTLS
cln := newLocalListener(t)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.ClientTLSInfo = mcfg.clientTLS
m.Name = mcfg.name
m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
t.Fatal(err)
}
clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String())
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
t.Fatal(err)
}
m.InitialClusterToken = clusterName
m.NewCluster = true
m.BootstrapTimeout = 10 * time.Millisecond
if m.PeerTLSInfo != nil {
m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
}
m.ElectionTicks = electionTicks
m.InitialElectionTickAdvance = true
m.TickMs = uint(tickDuration / time.Millisecond)
m.QuotaBackendBytes = mcfg.quotaBackendBytes
m.MaxTxnOps = mcfg.maxTxnOps
if m.MaxTxnOps == 0 {
m.MaxTxnOps = embed.DefaultMaxTxnOps
}
m.MaxRequestBytes = mcfg.maxRequestBytes
if m.MaxRequestBytes == 0 {
m.MaxRequestBytes = embed.DefaultMaxRequestBytes
}
m.SnapshotCount = etcdserver.DefaultSnapshotCount
if mcfg.snapshotCount != 0 {
m.SnapshotCount = mcfg.snapshotCount
}
m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries
if mcfg.snapshotCatchUpEntries != 0 {
m.SnapshotCatchUpEntries = mcfg.snapshotCatchUpEntries
}
// for the purpose of integration testing, simple token is enough
m.AuthToken = "simple"
if mcfg.authToken != "" {
m.AuthToken = mcfg.authToken
}
m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing
m.grpcServerOpts = []grpc.ServerOption{}
if mcfg.grpcKeepAliveMinTime > time.Duration(0) {
m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: mcfg.grpcKeepAliveMinTime,
PermitWithoutStream: false,
}))
}
if mcfg.grpcKeepAliveInterval > time.Duration(0) &&
mcfg.grpcKeepAliveTimeout > time.Duration(0) {
m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{
Time: mcfg.grpcKeepAliveInterval,
Timeout: mcfg.grpcKeepAliveTimeout,
}))
}
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
m.useIP = mcfg.useIP
m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval
m.InitialCorruptCheck = true
lcfg := logutil.DefaultZapLoggerConfig
m.LoggerConfig = &lcfg
m.LoggerConfig.OutputPaths = []string{"/dev/null"}
m.LoggerConfig.ErrorOutputPaths = []string{"/dev/null"}
if os.Getenv("CLUSTER_DEBUG") != "" {
m.LoggerConfig.OutputPaths = []string{"stderr"}
m.LoggerConfig.ErrorOutputPaths = []string{"stderr"}
}
m.Logger, err = m.LoggerConfig.Build()
if err != nil {
t.Fatal(err)
}
return m
}
|
End of preview. Expand
in Data Studio
The dataset is a collection of code snippets and their corresponding natural language queries. The task is to retrieve the most relevant code snippet for a given query.
| Task category | t2t |
| Domains | Programming, Written |
| Reference | https://huggingface.co/datasets/code_search_net/ |
Source datasets:
How to evaluate on this task
You can evaluate an embedding model on this dataset using the following code:
import mteb
task = mteb.get_task("CodeSearchNetRetrieval")
evaluator = mteb.MTEB([task])
model = mteb.get_model(YOUR_MODEL)
evaluator.run(model)
To learn more about how to run models on mteb task check out the GitHub repository.
Citation
If you use this dataset, please cite the dataset as well as mteb, as this dataset likely includes additional processing as a part of the MMTEB Contribution.
@article{husain2019codesearchnet,
author = {Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
journal = {arXiv preprint arXiv:1909.09436},
title = {{CodeSearchNet} challenge: Evaluating the state of semantic code search},
year = {2019},
}
@article{enevoldsen2025mmtebmassivemultilingualtext,
title={MMTEB: Massive Multilingual Text Embedding Benchmark},
author={Kenneth Enevoldsen and Isaac Chung and Imene Kerboua and Márton Kardos and Ashwin Mathur and David Stap and Jay Gala and Wissam Siblini and Dominik Krzemiński and Genta Indra Winata and Saba Sturua and Saiteja Utpala and Mathieu Ciancone and Marion Schaeffer and Gabriel Sequeira and Diganta Misra and Shreeya Dhakal and Jonathan Rystrøm and Roman Solomatin and Ömer Çağatan and Akash Kundu and Martin Bernstorff and Shitao Xiao and Akshita Sukhlecha and Bhavish Pahwa and Rafał Poświata and Kranthi Kiran GV and Shawon Ashraf and Daniel Auras and Björn Plüster and Jan Philipp Harries and Loïc Magne and Isabelle Mohr and Mariya Hendriksen and Dawei Zhu and Hippolyte Gisserot-Boukhlef and Tom Aarsen and Jan Kostkan and Konrad Wojtasik and Taemin Lee and Marek Šuppa and Crystina Zhang and Roberta Rocca and Mohammed Hamdy and Andrianos Michail and John Yang and Manuel Faysse and Aleksei Vatolin and Nandan Thakur and Manan Dey and Dipam Vasani and Pranjal Chitale and Simone Tedeschi and Nguyen Tai and Artem Snegirev and Michael Günther and Mengzhou Xia and Weijia Shi and Xing Han Lù and Jordan Clive and Gayatri Krishnakumar and Anna Maksimova and Silvan Wehrli and Maria Tikhonova and Henil Panchal and Aleksandr Abramov and Malte Ostendorff and Zheng Liu and Simon Clematide and Lester James Miranda and Alena Fenogenova and Guangyu Song and Ruqiya Bin Safi and Wen-Ding Li and Alessia Borghini and Federico Cassano and Hongjin Su and Jimmy Lin and Howard Yen and Lasse Hansen and Sara Hooker and Chenghao Xiao and Vaibhav Adlakha and Orion Weller and Siva Reddy and Niklas Muennighoff},
publisher = {arXiv},
journal={arXiv preprint arXiv:2502.13595},
year={2025},
url={https://arxiv.org/abs/2502.13595},
doi = {10.48550/arXiv.2502.13595},
}
@article{muennighoff2022mteb,
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Loïc and Reimers, Nils},
title = {MTEB: Massive Text Embedding Benchmark},
publisher = {arXiv},
journal={arXiv preprint arXiv:2210.07316},
year = {2022}
url = {https://arxiv.org/abs/2210.07316},
doi = {10.48550/ARXIV.2210.07316},
}
Dataset Statistics
Dataset Statistics
The following code contains the descriptive statistics from the task. These can also be obtained using:
import mteb
task = mteb.get_task("CodeSearchNetRetrieval")
desc_stats = task.metadata.descriptive_stats
{
"test": {
"num_samples": 12000,
"number_of_characters": 6496327,
"documents_text_statistics": {
"total_text_length": 4552253,
"min_text_length": 69,
"average_text_length": 758.7088333333334,
"max_text_length": 334374,
"unique_texts": 6000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 1944074,
"min_text_length": 2,
"average_text_length": 324.01233333333334,
"max_text_length": 17533,
"unique_texts": 5765
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 6000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 6000
},
"top_ranked_statistics": null,
"hf_subset_descriptive_stats": {
"python": {
"num_samples": 2000,
"number_of_characters": 1329388,
"documents_text_statistics": {
"total_text_length": 862842,
"min_text_length": 91,
"average_text_length": 862.842,
"max_text_length": 10914,
"unique_texts": 1000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 466546,
"min_text_length": 8,
"average_text_length": 466.546,
"max_text_length": 8636,
"unique_texts": 982
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 1000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 1000
},
"top_ranked_statistics": null
},
"javascript": {
"num_samples": 2000,
"number_of_characters": 1601650,
"documents_text_statistics": {
"total_text_length": 1415632,
"min_text_length": 95,
"average_text_length": 1415.632,
"max_text_length": 334374,
"unique_texts": 1000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 186018,
"min_text_length": 2,
"average_text_length": 186.018,
"max_text_length": 7657,
"unique_texts": 951
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 1000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 1000
},
"top_ranked_statistics": null
},
"go": {
"num_samples": 2000,
"number_of_characters": 688942,
"documents_text_statistics": {
"total_text_length": 563729,
"min_text_length": 69,
"average_text_length": 563.729,
"max_text_length": 15904,
"unique_texts": 1000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 125213,
"min_text_length": 14,
"average_text_length": 125.213,
"max_text_length": 1501,
"unique_texts": 988
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 1000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 1000
},
"top_ranked_statistics": null
},
"ruby": {
"num_samples": 2000,
"number_of_characters": 891452,
"documents_text_statistics": {
"total_text_length": 577634,
"min_text_length": 79,
"average_text_length": 577.634,
"max_text_length": 8171,
"unique_texts": 1000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 313818,
"min_text_length": 5,
"average_text_length": 313.818,
"max_text_length": 17533,
"unique_texts": 978
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 1000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 1000
},
"top_ranked_statistics": null
},
"java": {
"num_samples": 2000,
"number_of_characters": 1110647,
"documents_text_statistics": {
"total_text_length": 420287,
"min_text_length": 106,
"average_text_length": 420.287,
"max_text_length": 9142,
"unique_texts": 1000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 690360,
"min_text_length": 2,
"average_text_length": 690.36,
"max_text_length": 6473,
"unique_texts": 956
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 1000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 1000
},
"top_ranked_statistics": null
},
"php": {
"num_samples": 2000,
"number_of_characters": 874248,
"documents_text_statistics": {
"total_text_length": 712129,
"min_text_length": 108,
"average_text_length": 712.129,
"max_text_length": 15584,
"unique_texts": 1000
},
"documents_image_statistics": null,
"queries_text_statistics": {
"total_text_length": 162119,
"min_text_length": 5,
"average_text_length": 162.119,
"max_text_length": 1240,
"unique_texts": 911
},
"queries_image_statistics": null,
"relevant_docs_statistics": {
"num_relevant_docs": 1000,
"min_relevant_docs_per_query": 1,
"average_relevant_docs_per_query": 1.0,
"max_relevant_docs_per_query": 1,
"unique_relevant_docs": 1000
},
"top_ranked_statistics": null
}
}
}
}
This dataset card was automatically generated using MTEB
- Downloads last month
- 184